diff --git a/.circleci/config.yml b/.circleci/config.yml
index 64da19e34..a9bb28b98 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -6,9 +6,17 @@ orbs:
executors:
golang:
docker:
- # Must match GO_VERSION_MIN in project root. Change in gen.go
- - image: cimg/go:1.19.7
+ # Must match GO_VERSION_MIN in project root
+ - image: cimg/go:1.20.7
resource_class: medium+
+ golang-2xl:
+ docker:
+ # Must match GO_VERSION_MIN in project root
+ - image: cimg/go:1.20.7
+ resource_class: 2xlarge
+ ubuntu:
+ docker:
+ - image: ubuntu:20.04
commands:
build-platform-specific:
@@ -62,8 +70,6 @@ commands:
name: Restore parameters cache
keys:
- 'v26-2k-lotus-params'
- paths:
- - /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048
- save_cache:
name: Save parameters cache
@@ -158,10 +164,10 @@ jobs:
default: unit
description: Test suite name to report to CircleCI.
docker:
- - image: cimg/go:1.19.7
+ - image: cimg/go:1.20
environment:
LOTUS_HARMONYDB_HOSTS: yugabyte
- - image: yugabytedb/yugabyte:latest
+ - image: yugabytedb/yugabyte:2.18.0.0-b65
command: bin/yugabyted start --daemon=false
name: yugabyte
resource_class: << parameters.resource_class >>
@@ -212,7 +218,7 @@ jobs:
test with. If empty (the default) the commit defined by the git
submodule is used.
docker:
- - image: cimg/go:1.19.7
+ - image: cimg/go:1.20
resource_class: << parameters.resource_class >>
steps:
- install-ubuntu-deps
@@ -406,7 +412,7 @@ jobs:
description: |
Arguments to pass to golangci-lint
docker:
- - image: cimg/go:1.19.7
+ - image: cimg/go:1.20
resource_class: medium+
steps:
- install-ubuntu-deps
@@ -548,12 +554,6 @@ workflows:
- build
suite: itest-batch_deal
target: "./itests/batch_deal_test.go"
- - test:
- name: test-itest-ccupgrade
- requires:
- - build
- suite: itest-ccupgrade
- target: "./itests/ccupgrade_test.go"
- test:
name: test-itest-cli
requires:
@@ -891,12 +891,6 @@ workflows:
- build
suite: itest-remove_verifreg_datacap
target: "./itests/remove_verifreg_datacap_test.go"
- - test:
- name: test-itest-sdr_upgrade
- requires:
- - build
- suite: itest-sdr_upgrade
- target: "./itests/sdr_upgrade_test.go"
- test:
name: test-itest-sealing_resources
requires:
@@ -921,12 +915,6 @@ workflows:
- build
suite: itest-sector_import_simple
target: "./itests/sector_import_simple_test.go"
- - test:
- name: test-itest-sector_make_cc_avail
- requires:
- - build
- suite: itest-sector_make_cc_avail
- target: "./itests/sector_make_cc_avail_test.go"
- test:
name: test-itest-sector_miner_collateral
requires:
@@ -945,18 +933,6 @@ workflows:
- build
suite: itest-sector_pledge
target: "./itests/sector_pledge_test.go"
- - test:
- name: test-itest-sector_prefer_no_upgrade
- requires:
- - build
- suite: itest-sector_prefer_no_upgrade
- target: "./itests/sector_prefer_no_upgrade_test.go"
- - test:
- name: test-itest-sector_revert_available
- requires:
- - build
- suite: itest-sector_revert_available
- target: "./itests/sector_revert_available_test.go"
- test:
name: test-itest-sector_terminate
requires:
@@ -981,12 +957,6 @@ workflows:
- build
suite: itest-splitstore
target: "./itests/splitstore_test.go"
- - test:
- name: test-itest-tape
- requires:
- - build
- suite: itest-tape
- target: "./itests/tape_test.go"
- test:
name: test-itest-verifreg
requires:
@@ -1058,8 +1028,8 @@ workflows:
requires:
- build
suite: utest-unit-rest
- target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..."
- resource_class: 2xlarge
+ target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
+
- test:
name: test-unit-storage
requires:
diff --git a/.circleci/gen.go b/.circleci/gen.go
index d85b15d90..19329247a 100644
--- a/.circleci/gen.go
+++ b/.circleci/gen.go
@@ -10,13 +10,25 @@ import (
"text/template"
)
-const GoVersion = "1.19.7"
+var GoVersion = "" // from init below. Ex: 1.19.7
//go:generate go run ./gen.go ..
//go:embed template.yml
var templateFile embed.FS
+func init() {
+ b, err := os.ReadFile("../go.mod")
+ if err != nil {
+ panic("cannot find go.mod in parent folder")
+ }
+ for _, line := range strings.Split(string(b), "\n") {
+ if strings.HasPrefix(line, "go ") {
+ GoVersion = line[3:]
+ }
+ }
+}
+
type (
dirs = []string
suite = string
@@ -67,6 +79,8 @@ func main() {
if err != nil {
panic(err)
}
+ // Redundantly flag both absolute and relative paths as excluded
+ excluded[filepath.Join(repo, s)] = struct{}{}
excluded[e] = struct{}{}
}
}
diff --git a/.circleci/template.yml b/.circleci/template.yml
index d8eeb6048..89714308f 100644
--- a/.circleci/template.yml
+++ b/.circleci/template.yml
@@ -6,9 +6,17 @@ orbs:
executors:
golang:
docker:
- # Must match GO_VERSION_MIN in project root. Change in gen.go
- - image: cimg/go:[[ .GoVersion]]
+ # Must match GO_VERSION_MIN in project root
+ - image: cimg/go:1.20.7
resource_class: medium+
+ golang-2xl:
+ docker:
+ # Must match GO_VERSION_MIN in project root
+ - image: cimg/go:1.20.7
+ resource_class: 2xlarge
+ ubuntu:
+ docker:
+ - image: ubuntu:20.04
commands:
build-platform-specific:
@@ -62,8 +70,6 @@ commands:
name: Restore parameters cache
keys:
- 'v26-2k-lotus-params'
- paths:
- - /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048
- save_cache:
name: Save parameters cache
@@ -161,7 +167,7 @@ jobs:
- image: cimg/go:[[ .GoVersion]]
environment:
LOTUS_HARMONYDB_HOSTS: yugabyte
- - image: yugabytedb/yugabyte:latest
+ - image: yugabytedb/yugabyte:2.18.0.0-b65
command: bin/yugabyted start --daemon=false
name: yugabyte
resource_class: << parameters.resource_class >>
@@ -561,7 +567,6 @@ workflows:
suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]"
[[if eq $suite "unit-cli"]]get-params: true[[end]]
- [[- if eq $suite "unit-rest"]]resource_class: 2xlarge[[end]]
[[- end]]
- test:
go-test-flags: "-run=TestMulticoreSDR"
diff --git a/.github/ISSUE_TEMPLATE/task.md b/.github/ISSUE_TEMPLATE/task.md
new file mode 100644
index 000000000..205c82770
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/task.md
@@ -0,0 +1,31 @@
+---
+name: New Task
+about: A larger yet well-scoped task
+title: '
'
+labels: Needs Triage
+assignees: ''
+
+---
+
+## User Story
+
+
+## Acceptance Criteria
+
+
+
+```[tasklist]
+### Deliverables
+
+```
+
+## Technical Breakdown
+```[tasklist]
+### Development
+
+```
+
+```[tasklist]
+### Testing
+
+```
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index b6ef5fa3c..b5843c5b3 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -16,6 +16,7 @@ Before you mark the PR ready for review, please make sure that:
- example: ` fix: mempool: Introduce a cache for valid signatures`
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
+- [ ] If the PR affects users (e.g., new feature, bug fix, system requirements change), update the CHANGELOG.md and add details to the UNRELEASED section.
- [ ] New features have usage guidelines and / or documentation updates in
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
diff --git a/.golangci.yml b/.golangci.yml
index fe663ef7b..1d455e525 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -12,9 +12,9 @@ linters:
- unconvert
- staticcheck
- varcheck
- - structcheck
- deadcode
- scopelint
+ - unused
# We don't want to skip builtin/
skip-dirs-use-default: false
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 64fe60c5a..9076e978f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,9 +3,111 @@
# UNRELEASED
## New features
-- feat: Added new environment variable `LOTUS_EXEC_TRACE_CACHE_SIZE` to configure execution trace cache size ([filecoin-project/lotus#10585](https://github.com/filecoin-project/lotus/pull/10585))
- - If unset, we default to caching 16 most recent execution traces. Node operatores may want to set this to 0 while exchanges may want to crank it up.
+- feat: Added new tracing API (**HIGHLY EXPERIMENTAL**) supporting two RPC methods: `trace_block` and `trace_replayBlockTransactions` ([filecoin-project/lotus#11100](https://github.com/filecoin-project/lotus/pull/11100))
+# v1.23.3 / 2023-08-01
+
+This feature release of Lotus includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
+
+This feature release requires a **minimum Go version of v1.19.12 or higher to successfully build Lotus**. Go version 1.20 is also supported, but 1.21 is NOT.
+
+## Highlights
+
+- [Lotus now includes a Slasher tool](https://github.com/filecoin-project/lotus/pull/10928) to monitor the network for Consensus Faults, and report them as appropriate
+ - The Slasher investigates all incoming blocks, and assesses whether they trigger any of the three Consensus Faults defined in the Filecoin protocol
+ - If any faults are detected, the Slasher sends a `ReportConsensusFault` message to the faulty miner
+ - For more information on the Slasher, including how to run it, please find the documentation [here](https://lotus.filecoin.io/lotus/manage/slasher-and-disputer/)
+- The Ethereum-like RPC exposed by Lotus is now compatible with EIP-1898: https://github.com/filecoin-project/lotus/pull/10815
+- The lotus-miner PieceReader now supports parallel reads: https://github.com/filecoin-project/lotus/pull/10913
+- Added new environment variable `LOTUS_EXEC_TRACE_CACHE_SIZE` to configure execution trace cache size ([filecoin-project/lotus#10585](https://github.com/filecoin-project/lotus/pull/10585))
+ - If unset, we default to caching 16 most recent execution traces. Storage Providers may want to set this to 0, while exchanges may want to crank it up.
+
+## New features
+ - feat: miner cli: sectors list upgrade-bounds tool ([filecoin-project/lotus#10923](https://github.com/filecoin-project/lotus/pull/10923))
+ - Add new RPC stress testing tool (lotus-bench rpc) with rich reporting ([filecoin-project/lotus#10761](https://github.com/filecoin-project/lotus/pull/10761))
+ - feat: alert: Add FVM_CONCURRENCY alert ([filecoin-project/lotus#10933](https://github.com/filecoin-project/lotus/pull/10933))
+ - feat: Add eth_syncing RPC method ([filecoin-project/lotus#10719](https://github.com/filecoin-project/lotus/pull/10719))
+ - feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797))
+ - feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929))
+
+## Improvements
+ - chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040))
+ - feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015))
+ - sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002))
+ - chore: not display privatekey ([filecoin-project/lotus#11006](https://github.com/filecoin-project/lotus/pull/11006))
+ - chore: shed: update actor version ([filecoin-project/lotus#11020](https://github.com/filecoin-project/lotus/pull/11020))
+ - chore: migrate to boxo ([filecoin-project/lotus#10921](https://github.com/filecoin-project/lotus/pull/10921))
+ - feat: deflake TestDealsWithFinalizeEarly ([filecoin-project/lotus#10978](https://github.com/filecoin-project/lotus/pull/10978))
+ - fix: pubsub: do not treat ErrExistingNonce as Reject ([filecoin-project/lotus#10973](https://github.com/filecoin-project/lotus/pull/10973))
+ - feat: deflake TestDMLevelPartialRetrieval (#10972) ([filecoin-project/lotus#10972](https://github.com/filecoin-project/lotus/pull/10972))
+ - fix: eth: ensure that the event topics are non-nil ([filecoin-project/lotus#10971](https://github.com/filecoin-project/lotus/pull/10971))
+ - Add comment stating msgIndex is an experimental feature ([filecoin-project/lotus#10968](https://github.com/filecoin-project/lotus/pull/10968))
+ - feat: cli(compute-state) default to the tipset at the given epoch ([filecoin-project/lotus#10965](https://github.com/filecoin-project/lotus/pull/10965))
+ - Upgrade urfave dependency which now supports DisableSliceFlagSeparato… ([filecoin-project/lotus#10950](https://github.com/filecoin-project/lotus/pull/10950))
+ - Add new lotus-shed command for computing eth hash for a given message cid (#10961) ([filecoin-project/lotus#10961](https://github.com/filecoin-project/lotus/pull/10961))
+ - Prefill GetTipsetByHeight skiplist cache on lotus startup ([filecoin-project/lotus#10955](https://github.com/filecoin-project/lotus/pull/10955))
+ - Add lotus-shed command for backfilling txhash.db ([filecoin-project/lotus#10932](https://github.com/filecoin-project/lotus/pull/10932))
+ - chore: deps: update to go-libp2p 0.27.5 ([filecoin-project/lotus#10948](https://github.com/filecoin-project/lotus/pull/10948))
+ - Small improvement to make gen output ([filecoin-project/lotus#10951](https://github.com/filecoin-project/lotus/pull/10951))
+ - fix: improve perf of msgindex backfill ([filecoin-project/lotus#10941](https://github.com/filecoin-project/lotus/pull/10941))
+ - deps: update libp2p ([filecoin-project/lotus#10936](https://github.com/filecoin-project/lotus/pull/10936))
+ - sealing: Improve upgrade sector selection ([filecoin-project/lotus#10915](https://github.com/filecoin-project/lotus/pull/10915))
+ - Add timing test for mpool select with a large mpool dump ([filecoin-project/lotus#10650](https://github.com/filecoin-project/lotus/pull/10650))
+ - feat: slashfilter: drop outdated near-upgrade check ([filecoin-project/lotus#10925](https://github.com/filecoin-project/lotus/pull/10925))
+ - opt: MinerInfo adds the PendingOwnerAddress field ([filecoin-project/lotus#10927](https://github.com/filecoin-project/lotus/pull/10927))
+ - feat: itest: force PoSt more aggressively around deadline closure ([filecoin-project/lotus#10926](https://github.com/filecoin-project/lotus/pull/10926))
+ - test: messagepool: gas rewards are negative if GasFeeCap too low ([filecoin-project/lotus#10649](https://github.com/filecoin-project/lotus/pull/10649))
+ - fix: types: error out on decoding BlockMsg with extraneous data ([filecoin-project/lotus#10863](https://github.com/filecoin-project/lotus/pull/10863))
+ - update interop upgrade schedule ([filecoin-project/lotus#10879](https://github.com/filecoin-project/lotus/pull/10879))
+ - itests: Test PoSt V1_1 on workers ([filecoin-project/lotus#10732](https://github.com/filecoin-project/lotus/pull/10732))
+ - Update gas_balancing.md ([filecoin-project/lotus#10924](https://github.com/filecoin-project/lotus/pull/10924))
+ - feat: cli: Make compact partitions cmd better ([filecoin-project/lotus#9070](https://github.com/filecoin-project/lotus/pull/9070))
+ - fix: include extra messages in ComputeState InvocResult output ([filecoin-project/lotus#10628](https://github.com/filecoin-project/lotus/pull/10628))
+ - feat: pubsub: treat ErrGasFeeCapTooLow as ignore, not reject ([filecoin-project/lotus#10652](https://github.com/filecoin-project/lotus/pull/10652))
+ - feat: run lotus-shed commands in context that is cancelled on sigterm ([filecoin-project/lotus#10877](https://github.com/filecoin-project/lotus/pull/10877))
+ - fix:lotus-fountain:set default data-cap same as MinVerifiedDealSize ([filecoin-project/lotus#10920](https://github.com/filecoin-project/lotus/pull/10920))
+ - pass the right g-recaptcha data
+ - fix: not call RUnlock ([filecoin-project/lotus#10912](https://github.com/filecoin-project/lotus/pull/10912))
+ - opt: cli: If present, print Events Root ([filecoin-project/lotus#10893](https://github.com/filecoin-project/lotus/pull/10893))
+ - Calibration faucet UI improvements ([filecoin-project/lotus#10905](https://github.com/filecoin-project/lotus/pull/10905))
+ - chore: chain: replace storetheindex with go-libipni ([filecoin-project/lotus#10841](https://github.com/filecoin-project/lotus/pull/10841))
+ - Add alerts to `Lotus info` cmd ([filecoin-project/lotus#10894](https://github.com/filecoin-project/lotus/pull/10894))
+ - fix: cli: make redeclare cmd work properly ([filecoin-project/lotus#10860](https://github.com/filecoin-project/lotus/pull/10860))
+ - fix: shed remove datacap not working with ledger ([filecoin-project/lotus#10880](https://github.com/filecoin-project/lotus/pull/10880))
+ - Check if epoch is negative in GetTipsetByHeight ([filecoin-project/lotus#10878](https://github.com/filecoin-project/lotus/pull/10878))
+ - chore: update go-fil-markets ([filecoin-project/lotus#10867](https://github.com/filecoin-project/lotus/pull/10867))
+ - feat: alerts: Add lotus-miner legacy-markets alert ([filecoin-project/lotus#10868](https://github.com/filecoin-project/lotus/pull/10868))
+ - feat:fountain:add grant-datacap support ([filecoin-project/lotus#10856](https://github.com/filecoin-project/lotus/pull/10856))
+ - feat: itests: add logs to blockminer.go failure case ([filecoin-project/lotus#10861](https://github.com/filecoin-project/lotus/pull/10861))
+ - feat: eth: Add support for blockHash param in eth_getLogs ([filecoin-project/lotus#10782](https://github.com/filecoin-project/lotus/pull/10782))
+ - lotus-fountain: make compatible with 0x addresses #10560 ([filecoin-project/lotus#10784](https://github.com/filecoin-project/lotus/pull/10784))
+ - feat: deflake sector_import_simple ([filecoin-project/lotus#10858](https://github.com/filecoin-project/lotus/pull/10858))
+ - fix: splitstore: remove deadlock around waiting for sync ([filecoin-project/lotus#10857](https://github.com/filecoin-project/lotus/pull/10857))
+ - fix: sched: Address GET_32G_MAX_CONCURRENT regression (#10850) ([filecoin-project/lotus#10850](https://github.com/filecoin-project/lotus/pull/10850))
+ - feat: fix deadlock in splitstore-mpool interaction ([filecoin-project/lotus#10840](https://github.com/filecoin-project/lotus/pull/10840))
+ - chore: update go-libp2p to v0.27.3 ([filecoin-project/lotus#10671](https://github.com/filecoin-project/lotus/pull/10671))
+ - libp2p: add QUIC and WebTransport to default listen addresses ([filecoin-project/lotus#10848](https://github.com/filecoin-project/lotus/pull/10848))
+ - fix: ci: Debugging m1 build ([filecoin-project/lotus#10749](https://github.com/filecoin-project/lotus/pull/10749))
+ - Validate that FromBlock/ToBlock epoch is indeed a hex value (#10780) ([filecoin-project/lotus#10780](https://github.com/filecoin-project/lotus/pull/10780))
+ - fix: remove invalid field UpgradePriceListOopsHeight ([filecoin-project/lotus#10772](https://github.com/filecoin-project/lotus/pull/10772))
+ - feat: deflake eth_balance_test ([filecoin-project/lotus#10847](https://github.com/filecoin-project/lotus/pull/10847))
+ - fix: tests: Use mutex-wrapped datastore in storage tests ([filecoin-project/lotus#10846](https://github.com/filecoin-project/lotus/pull/10846))
+ - Make lotus-fountain UI slightly friendlier ([filecoin-project/lotus#10785](https://github.com/filecoin-project/lotus/pull/10785))
+ - Make (un)subscribe and filter RPC methods require only read perm ([filecoin-project/lotus#10825](https://github.com/filecoin-project/lotus/pull/10825))
+ - deps: Update go-jsonrpc to v0.3.1 ([filecoin-project/lotus#10845](https://github.com/filecoin-project/lotus/pull/10845))
+ - feat: deflake paych_api_test ([filecoin-project/lotus#10843](https://github.com/filecoin-project/lotus/pull/10843))
+ - fix: Eth RPC: do not occlude block param errors. ([filecoin-project/lotus#10534](https://github.com/filecoin-project/lotus/pull/10534))
+ - feat: cli: More ux-friendly batching cmds ([filecoin-project/lotus#10837](https://github.com/filecoin-project/lotus/pull/10837))
+ - fix: cli: Hide legacy markets cmds ([filecoin-project/lotus#10842](https://github.com/filecoin-project/lotus/pull/10842))
+ - feat: chainstore: exit early in MaybeTakeHeavierTipset ([filecoin-project/lotus#10839](https://github.com/filecoin-project/lotus/pull/10839))
+ - fix: itest: fix eth deploy test flake ([filecoin-project/lotus#10829](https://github.com/filecoin-project/lotus/pull/10829))
+ - style: mempool: chain errors using xerrors.Errorf ([filecoin-project/lotus#10836](https://github.com/filecoin-project/lotus/pull/10836))
+ - feat: deflake msgindex_test.go ([filecoin-project/lotus#10826](https://github.com/filecoin-project/lotus/pull/10826))
+ - feat: deflake TestEthFeeHistory ([filecoin-project/lotus#10816](https://github.com/filecoin-project/lotus/pull/10816))
+ - feat: make RunClientTest louder when deals fail ([filecoin-project/lotus#10817](https://github.com/filecoin-project/lotus/pull/10817))
+ - fix: cli: Change arg wording in change-beneficiary cmd ([filecoin-project/lotus#10823](https://github.com/filecoin-project/lotus/pull/10823))
+ - refactor: streamline error handling in CheckPendingMessages (#10818) ([filecoin-project/lotus#10818](https://github.com/filecoin-project/lotus/pull/10818))
+ - feat: Add tmp indices to events table while performing migration to V2
# v1.23.2 / 2023-06-28
diff --git a/Dockerfile b/Dockerfile
index 596fb4528..c9750a71f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
#####################################
-FROM golang:1.19.7-buster AS lotus-builder
+FROM golang:1.20.7-bullseye AS lotus-builder
MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
@@ -58,7 +58,7 @@ COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
-COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/
+COPY --from=lotus-builder /usr/lib/*/libhwloc.so.* /lib/
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
RUN useradd -r -u 532 -U fc \
diff --git a/GO_VERSION_MIN b/GO_VERSION_MIN
index 98adfe8e1..8909929f6 100644
--- a/GO_VERSION_MIN
+++ b/GO_VERSION_MIN
@@ -1 +1 @@
-1.19.7
+1.20.7
diff --git a/Makefile b/Makefile
index 25b2b04c5..f08a054b6 100644
--- a/Makefile
+++ b/Makefile
@@ -203,7 +203,7 @@ lotus-health:
.PHONY: lotus-health
BINS+=lotus-health
-lotus-wallet:
+lotus-wallet: $(BUILD_DEPS)
rm -f lotus-wallet
$(GOCC) build $(GOFLAGS) -o lotus-wallet ./cmd/lotus-wallet
.PHONY: lotus-wallet
@@ -318,7 +318,8 @@ actors-code-gen:
$(GOCC) run ./chain/actors/agen
$(GOCC) fmt ./...
-actors-gen: actors-code-gen fiximports
+actors-gen: actors-code-gen
+ ./scripts/fiximports
.PHONY: actors-gen
bundle-gen:
@@ -378,7 +379,8 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin
fiximports:
./scripts/fiximports
-gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
+gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci
+ ./scripts/fiximports
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
.PHONY: gen
diff --git a/README.md b/README.md
index b67cb952f..f6ac75932 100644
--- a/README.md
+++ b/README.md
@@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l
#### Go
-To build Lotus, you need a working installation of [Go 1.19.7 or higher](https://golang.org/dl/):
+To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/):
```bash
-wget -c https://golang.org/dl/go1.19.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
+wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
```
**TIP:**
diff --git a/api/api_full.go b/api/api_full.go
index 591799b48..55dcc23df 100644
--- a/api/api_full.go
+++ b/api/api_full.go
@@ -641,6 +641,11 @@ type FullNode interface {
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
+ // StateGetRandomnessDigestFromTickets. is used to sample the chain for randomness.
+ StateGetRandomnessDigestFromTickets(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
+ // StateGetRandomnessDigestFromBeacon is used to sample the beacon for randomness.
+ StateGetRandomnessDigestFromBeacon(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
+
// StateGetBeaconEntry returns the beacon entry for the given filecoin epoch. If
// the entry has not yet been produced, the call will block until the entry
// becomes available
@@ -863,6 +868,13 @@ type FullNode interface {
// Returns the client version
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
+ // TraceAPI related methods
+ //
+ // Returns traces created at given block
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read
+ // Replays all transactions in a block returning the requested traces for each transaction
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
+
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
diff --git a/api/api_gateway.go b/api/api_gateway.go
index f6740e1e0..08199564d 100644
--- a/api/api_gateway.go
+++ b/api/api_gateway.go
@@ -10,6 +10,7 @@ import (
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline"
apitypes "github.com/filecoin-project/lotus/api/types"
@@ -65,6 +66,11 @@ type Gateway interface {
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
+ StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
+ StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
+ StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
@@ -121,4 +127,6 @@ type Gateway interface {
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
Web3ClientVersion(ctx context.Context) (string, error)
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
}
diff --git a/api/eth_aliases.go b/api/eth_aliases.go
index ca0f861ac..eb0c51005 100644
--- a/api/eth_aliases.go
+++ b/api/eth_aliases.go
@@ -40,6 +40,9 @@ func CreateEthRPCAliases(as apitypes.Aliaser) {
as.AliasMethod("eth_subscribe", "Filecoin.EthSubscribe")
as.AliasMethod("eth_unsubscribe", "Filecoin.EthUnsubscribe")
+ as.AliasMethod("trace_block", "Filecoin.EthTraceBlock")
+ as.AliasMethod("trace_replayBlockTransactions", "Filecoin.EthTraceReplayBlockTransactions")
+
as.AliasMethod("net_version", "Filecoin.NetVersion")
as.AliasMethod("net_listening", "Filecoin.NetListening")
diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go
index a1e9c1230..856d83813 100644
--- a/api/mocks/mock_full.go
+++ b/api/mocks/mock_full.go
@@ -1491,6 +1491,36 @@ func (mr *MockFullNodeMockRecorder) EthSyncing(arg0 interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0)
}
+// EthTraceBlock mocks base method.
+func (m *MockFullNode) EthTraceBlock(arg0 context.Context, arg1 string) ([]*ethtypes.EthTraceBlock, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EthTraceBlock", arg0, arg1)
+ ret0, _ := ret[0].([]*ethtypes.EthTraceBlock)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// EthTraceBlock indicates an expected call of EthTraceBlock.
+func (mr *MockFullNodeMockRecorder) EthTraceBlock(arg0, arg1 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceBlock", reflect.TypeOf((*MockFullNode)(nil).EthTraceBlock), arg0, arg1)
+}
+
+// EthTraceReplayBlockTransactions mocks base method.
+func (m *MockFullNode) EthTraceReplayBlockTransactions(arg0 context.Context, arg1 string, arg2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "EthTraceReplayBlockTransactions", arg0, arg1, arg2)
+ ret0, _ := ret[0].([]*ethtypes.EthTraceReplayBlockTransaction)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// EthTraceReplayBlockTransactions indicates an expected call of EthTraceReplayBlockTransactions.
+func (mr *MockFullNodeMockRecorder) EthTraceReplayBlockTransactions(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceReplayBlockTransactions", reflect.TypeOf((*MockFullNode)(nil).EthTraceReplayBlockTransactions), arg0, arg1, arg2)
+}
+
// EthUninstallFilter mocks base method.
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
m.ctrl.T.Helper()
@@ -3263,6 +3293,36 @@ func (mr *MockFullNodeMockRecorder) StateGetNetworkParams(arg0 interface{}) *gom
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetNetworkParams", reflect.TypeOf((*MockFullNode)(nil).StateGetNetworkParams), arg0)
}
+// StateGetRandomnessDigestFromBeacon mocks base method.
+func (m *MockFullNode) StateGetRandomnessDigestFromBeacon(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (abi.Randomness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateGetRandomnessDigestFromBeacon", arg0, arg1, arg2)
+ ret0, _ := ret[0].(abi.Randomness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateGetRandomnessDigestFromBeacon indicates an expected call of StateGetRandomnessDigestFromBeacon.
+func (mr *MockFullNodeMockRecorder) StateGetRandomnessDigestFromBeacon(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessDigestFromBeacon", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessDigestFromBeacon), arg0, arg1, arg2)
+}
+
+// StateGetRandomnessDigestFromTickets mocks base method.
+func (m *MockFullNode) StateGetRandomnessDigestFromTickets(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (abi.Randomness, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "StateGetRandomnessDigestFromTickets", arg0, arg1, arg2)
+ ret0, _ := ret[0].(abi.Randomness)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// StateGetRandomnessDigestFromTickets indicates an expected call of StateGetRandomnessDigestFromTickets.
+func (mr *MockFullNodeMockRecorder) StateGetRandomnessDigestFromTickets(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessDigestFromTickets", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessDigestFromTickets), arg0, arg1, arg2)
+}
+
// StateGetRandomnessFromBeacon mocks base method.
func (m *MockFullNode) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
m.ctrl.T.Helper()
diff --git a/api/proxy_gen.go b/api/proxy_gen.go
index ce4ec3d1e..1082e8f4c 100644
--- a/api/proxy_gen.go
+++ b/api/proxy_gen.go
@@ -316,6 +316,10 @@ type FullNodeMethods struct {
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"`
+ EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) `perm:"read"`
+
+ EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) `perm:"read"`
+
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"`
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"`
@@ -494,6 +498,10 @@ type FullNodeMethods struct {
StateGetNetworkParams func(p0 context.Context) (*NetworkParams, error) `perm:"read"`
+ StateGetRandomnessDigestFromBeacon func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
+
+ StateGetRandomnessDigestFromTickets func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
+
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
@@ -728,6 +736,10 @@ type GatewayMethods struct {
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) ``
+ EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) ``
+
+ EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) ``
+
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
@@ -766,6 +778,16 @@ type GatewayMethods struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
+ StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
+
+ StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
+
+ StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
+
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
@@ -2443,6 +2465,28 @@ func (s *FullNodeStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
+func (s *FullNodeStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ if s.Internal.EthTraceBlock == nil {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+ }
+ return s.Internal.EthTraceBlock(p0, p1)
+}
+
+func (s *FullNodeStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+}
+
+func (s *FullNodeStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if s.Internal.EthTraceReplayBlockTransactions == nil {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+ }
+ return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
+}
+
+func (s *FullNodeStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+}
+
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
if s.Internal.EthUninstallFilter == nil {
return false, ErrNotSupported
@@ -3422,6 +3466,28 @@ func (s *FullNodeStub) StateGetNetworkParams(p0 context.Context) (*NetworkParams
return nil, ErrNotSupported
}
+func (s *FullNodeStruct) StateGetRandomnessDigestFromBeacon(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
+ if s.Internal.StateGetRandomnessDigestFromBeacon == nil {
+ return *new(abi.Randomness), ErrNotSupported
+ }
+ return s.Internal.StateGetRandomnessDigestFromBeacon(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateGetRandomnessDigestFromBeacon(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
+ return *new(abi.Randomness), ErrNotSupported
+}
+
+func (s *FullNodeStruct) StateGetRandomnessDigestFromTickets(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
+ if s.Internal.StateGetRandomnessDigestFromTickets == nil {
+ return *new(abi.Randomness), ErrNotSupported
+ }
+ return s.Internal.StateGetRandomnessDigestFromTickets(p0, p1, p2)
+}
+
+func (s *FullNodeStub) StateGetRandomnessDigestFromTickets(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
+ return *new(abi.Randomness), ErrNotSupported
+}
+
func (s *FullNodeStruct) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
if s.Internal.StateGetRandomnessFromBeacon == nil {
return *new(abi.Randomness), ErrNotSupported
@@ -4643,6 +4709,28 @@ func (s *GatewayStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult,
return *new(ethtypes.EthSyncingResult), ErrNotSupported
}
+func (s *GatewayStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ if s.Internal.EthTraceBlock == nil {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+ }
+ return s.Internal.EthTraceBlock(p0, p1)
+}
+
+func (s *GatewayStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
+ return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
+}
+
+func (s *GatewayStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if s.Internal.EthTraceReplayBlockTransactions == nil {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+ }
+ return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
+}
+
+func (s *GatewayStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
+}
+
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
if s.Internal.EthUninstallFilter == nil {
return false, ErrNotSupported
@@ -4852,6 +4940,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
return nil, ErrNotSupported
}
+func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocation == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocation(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocationForPendingDeal == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocations == nil {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+ }
+ return s.Internal.StateGetAllocations(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaim == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetClaim(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaims == nil {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+ }
+ return s.Internal.StateGetClaims(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+}
+
func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
if s.Internal.StateListMiners == nil {
return *new([]address.Address), ErrNotSupported
diff --git a/api/types.go b/api/types.go
index 8db5120a8..96f9fa63d 100644
--- a/api/types.go
+++ b/api/types.go
@@ -312,6 +312,7 @@ type NetworkParams struct {
SupportedProofTypes []abi.RegisteredSealProof
PreCommitChallengeDelay abi.ChainEpoch
ForkUpgradeParams ForkUpgradeParams
+ Eip155ChainID int
}
type ForkUpgradeParams struct {
@@ -339,6 +340,7 @@ type ForkUpgradeParams struct {
UpgradeHyggeHeight abi.ChainEpoch
UpgradeLightningHeight abi.ChainEpoch
UpgradeThunderHeight abi.ChainEpoch
+ UpgradeWatermelonHeight abi.ChainEpoch
}
type NonceMapType map[address.Address]uint64
diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go
index 9f6c54fa9..df9153b8d 100644
--- a/api/v0api/gateway.go
+++ b/api/v0api/gateway.go
@@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network"
@@ -61,6 +62,11 @@ type Gateway interface {
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
+ StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
+ StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
+ StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go
index 29f6f6773..22faa3acc 100644
--- a/api/v0api/proxy_gen.go
+++ b/api/v0api/proxy_gen.go
@@ -479,6 +479,16 @@ type GatewayMethods struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
+ StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
+
+ StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
+
+ StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
+
+ StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
+
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
@@ -2851,6 +2861,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
return nil, ErrNotSupported
}
+func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocation == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocation(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocationForPendingDeal == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ if s.Internal.StateGetAllocations == nil {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+ }
+ return s.Internal.StateGetAllocations(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaim == nil {
+ return nil, ErrNotSupported
+ }
+ return s.Internal.StateGetClaim(p0, p1, p2, p3)
+}
+
+func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
+ return nil, ErrNotSupported
+}
+
+func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ if s.Internal.StateGetClaims == nil {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+ }
+ return s.Internal.StateGetClaims(p0, p1, p2)
+}
+
+func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
+}
+
func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
if s.Internal.StateGetReceipt == nil {
return nil, ErrNotSupported
diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go
index 1f1ba0e99..c1a95c8b0 100644
--- a/blockstore/splitstore/splitstore.go
+++ b/blockstore/splitstore/splitstore.go
@@ -182,7 +182,6 @@ type SplitStore struct {
compactionIndex int64
pruneIndex int64
- onlineGCCnt int64
ctx context.Context
cancel func()
diff --git a/build/actors/v12.tar.zst b/build/actors/v12.tar.zst
new file mode 100644
index 000000000..def521bd7
Binary files /dev/null and b/build/actors/v12.tar.zst differ
diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go
index 13f595b6e..3eed5bcf7 100644
--- a/build/builtin_actors_gen.go
+++ b/build/builtin_actors_gen.go
@@ -71,9 +71,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacebojf25kc5yo7gskdbdgg5f52oppej2jp6nknzlvrww4ue5vkddd2"),
"verifiedregistry": MustParseCid("bafk2bzaceavue3zekq4wmvttck2vgxlcensrsgh5niu5qhna2owejycorftcc"),
},
+}, {
+ Network: "butterflynet",
+ Version: 11,
+
+ ManifestCid: MustParseCid("bafy2bzaceaiy4dsxxus5xp5n5i4tjzkb7sc54mjz7qnk2efhgmsrobjesxnza"),
+ Actors: map[string]cid.Cid{
+ "account": MustParseCid("bafk2bzacecfdqb7p3jakhaa3cqnzpt7hxmhghrbxvafsylqno3febx55fnidw"),
+ "cron": MustParseCid("bafk2bzaceavmqu2qihgbe3xdaotgypuzvdpiifnm7ll6rolks2u4lac6voosk"),
+ "datacap": MustParseCid("bafk2bzacealtvh65rzb34fmyzw4m2np2htnio4w3pn4alzqovwxkdbf23dvpo"),
+ "eam": MustParseCid("bafk2bzacedko6hcjmwpuwgma5pb4gr2wgyvregk3nqqjxit7dv4es6vh5cjoc"),
+ "ethaccount": MustParseCid("bafk2bzacedhcei2xnr34poxr4xziypm2obqlibke4cs2cjfnr3sz6nf6h7fyy"),
+ "evm": MustParseCid("bafk2bzacebn5lwxboiikhz67ajwa34v2lc4qevnhpwdnipbmrnutkvrrqkb46"),
+ "init": MustParseCid("bafk2bzacea6vw4esh5tg7mprv5jkbx5xcyilcy4vvf64lss32mjyuvv2mh5ng"),
+ "multisig": MustParseCid("bafk2bzacedq2afnwcfipay5twv5mgzjoio5bbjvyo4yqchdwqcr7wrareyx54"),
+ "paymentchannel": MustParseCid("bafk2bzacebbsvr7i7mqmaadyjibe5wxnv7bwvvec2wlgknuwda6ep45amnd5w"),
+ "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
+ "reward": MustParseCid("bafk2bzaceafuh6idvaqqkj353vs4qdl42tcmvnymewu5zf4rq2nruxdyunses"),
+ "storagemarket": MustParseCid("bafk2bzaceb7bx4honi3byjllpdk6fea32dpu3vqvil3okodybdk5m3erlnwjw"),
+ "storageminer": MustParseCid("bafk2bzacebxjhofdr3sb2uhy2ky2vcijh4nhmwkh5xijtbgk6dzkknji2kn7a"),
+ "storagepower": MustParseCid("bafk2bzaceabskmmkas6njbowols7t4ib3bipa5abpomk3jtgfwojtzd7mjzfm"),
+ "system": MustParseCid("bafk2bzacedtuh7cht3fud7fb4avl4g2zbz57lc4ohiaufpaex6dkmdokn5rgo"),
+ "verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"),
+ },
}, {
Network: "butterflynet",
- Version: 11,
+ Version: 12,
BundleGitTag: "v11.0.0",
ManifestCid: MustParseCid("bafy2bzaceaiy4dsxxus5xp5n5i4tjzkb7sc54mjz7qnk2efhgmsrobjesxnza"),
Actors: map[string]cid.Cid{
@@ -155,10 +178,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzacec67wuchq64k7kgrujguukjvdlsl24pgighqdx5vgjhyk6bycrwnc"),
},
}, {
- Network: "calibrationnet",
- Version: 11,
- BundleGitTag: "v11.0.0-rc2",
- ManifestCid: MustParseCid("bafy2bzacedhuowetjy2h4cxnijz2l64h4mzpk5m256oywp4evarpono3cjhco"),
+ Network: "calibrationnet",
+ Version: 11,
+
+ ManifestCid: MustParseCid("bafy2bzacedhuowetjy2h4cxnijz2l64h4mzpk5m256oywp4evarpono3cjhco"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacebor5mnjnsav34cmm5pcd3dy4wubbv4wtcrvba7depy3sct7ie4sy"),
"cron": MustParseCid("bafk2bzacebetehhedh55alfn4rcx2mhjhvuiustxlhtxc3drkemnpttws5eqw"),
@@ -177,6 +200,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacedqvik2n3phnj3cni3h2k5mtvz43nyq7mdmv7k7euejysvajywdug"),
"verifiedregistry": MustParseCid("bafk2bzaceceoo5jlom2zweh7kpye2vkj33wgqnkjshlsw2neemqkfg5g2rmvg"),
},
+}, {
+ Network: "calibrationnet",
+ Version: 12,
+ BundleGitTag: "v11.0.0",
+ ManifestCid: MustParseCid("bafy2bzacec5fl7crmxyw234qsmijvffhssgqwuphyaejulbryhel2pxxrxgey"),
+ Actors: map[string]cid.Cid{
+ "account": MustParseCid("bafk2bzacecrjovyiuh4jryepy4pxddzqjyrg2hfinxzbv37bpzlci54r5mkr6"),
+ "cron": MustParseCid("bafk2bzacedy76woxmtalmsuaji4kog6wmq4h2kcgcyv5wpxbdz7f2ls2tjjmw"),
+ "datacap": MustParseCid("bafk2bzacec2inqddxpfm3rufwqr752d2ok3ve4cxfhmloyosy6rj2krtkpwus"),
+ "eam": MustParseCid("bafk2bzacea6sxno66egkqz5rqjq4e22obkeblxl7e3funjifljuinmrc2ztzg"),
+ "ethaccount": MustParseCid("bafk2bzacecdsvs7xm3ncm66lsjqh65uzhr3rmu3dlux7qzdgpg737r4kslhxm"),
+ "evm": MustParseCid("bafk2bzaceaz3b66m2znt27clmbp2zi5jsobw6g2x6fiezynyijgtkehgqhq3a"),
+ "init": MustParseCid("bafk2bzacecdrw7uedx456hnowtyyhm63mkekdlkh3vmlhvqlya6pn6pokiq5y"),
+ "multisig": MustParseCid("bafk2bzaceaxyxvmng5cel5huzy5nezscm34s7wuzn2fhndexurs3xjtp7xg5i"),
+ "paymentchannel": MustParseCid("bafk2bzacedrmyc4c6u6ipdo7hwaagx3urr47r4pw6lwv257wqbj6roumwfvky"),
+ "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
+ "reward": MustParseCid("bafk2bzacecq3bhrkatwash5zhy2275ksaj3criqb6rox5e3hsyvz7mrl2jh3o"),
+ "storagemarket": MustParseCid("bafk2bzacedswrpkbh7jmttskllbblym7oj2ynxp7bxtj2fpbxsx55mraog6sc"),
+ "storageminer": MustParseCid("bafk2bzacecki6ckm7gf4uje3fxvw6x5f77ukaqctvcsfha6oaecvl67veh3sg"),
+ "storagepower": MustParseCid("bafk2bzacecjcvxwibkgpufeah33gfd2jzlqjx5rn2pguvvch2squon23u6kne"),
+ "system": MustParseCid("bafk2bzaceavvlgqbcwhy3c24aa24z23wcbzggmb66gj7x5if7o3fbvddaocc4"),
+ "verifiedregistry": MustParseCid("bafk2bzacedmxdtnpy2mc63b6bi2h4vp4dfc6hxjckqnwaxyijgkpmangygcmk"),
+ },
}, {
Network: "caterpillarnet",
Version: 8,
@@ -246,9 +292,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacecfivztuulqqv4o5oyvvvrkblwix4hqt24pqru6ivnpioefhuhria"),
"verifiedregistry": MustParseCid("bafk2bzacecdhw6x7dfrxfysmn6tdbn2ny464omgqppxhjuawxauscidppd7pc"),
},
+}, {
+ Network: "caterpillarnet",
+ Version: 11,
+
+ ManifestCid: MustParseCid("bafy2bzacebexc2jgzwr5ngn6jdnkwdqwwmcapajuypdgvopoe6bnvp4yxm4o2"),
+ Actors: map[string]cid.Cid{
+ "account": MustParseCid("bafk2bzaceanjiq5m3feytue5m7hhxfkob2ofg2greoct5tr77reuhrjglo66g"),
+ "cron": MustParseCid("bafk2bzaceavgd5qj6n744tukhdrvxejygzs3jnlizmcvjsdnxkgiimrd5jrys"),
+ "datacap": MustParseCid("bafk2bzacedmdywxwrzop2gmf4ys5stydlmvbe35j3nyr2efmf273briksuvse"),
+ "eam": MustParseCid("bafk2bzacec7qo7s72li7tqysllstlrxxm2dhfqv2w32pytel2e775cki4ozqm"),
+ "ethaccount": MustParseCid("bafk2bzaceaygtkliu26ubb7ivljrvaeesp5sbjlis5okzl35ishxioa2tlx4w"),
+ "evm": MustParseCid("bafk2bzacebo7iqzy2ophz4f3civzwlltec7q5fut7kmtfckr6vy33r6ic5eqe"),
+ "init": MustParseCid("bafk2bzaceb7uzzlsquqwrqhb2vpbvk3jgr4wp5i3smu2splnag2v5sppdehns"),
+ "multisig": MustParseCid("bafk2bzacebwibfqrytobl4pjtny244zkmfoomazbap3r5gddjryckx5js4csi"),
+ "paymentchannel": MustParseCid("bafk2bzacecuaa5esuxpouigxoamyl5gire2qqqhvyhewsig2x2j73f6ksh7go"),
+ "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
+ "reward": MustParseCid("bafk2bzaced4xxqhv63njf2ibvsqshlwikafctxev7aho5lgsfxyt2javjwvtw"),
+ "storagemarket": MustParseCid("bafk2bzacedwtx3xokqmbgkgkoqkdt6lam4ymdjb3eznlbtec5wcrtx74l2bpc"),
+ "storageminer": MustParseCid("bafk2bzacebbbe4sdo3xxkez7x7lkl6j46w34vx7eg7xswmdzhp7moa44p3wjg"),
+ "storagepower": MustParseCid("bafk2bzacedfgz6n24tjsor4pcayomim2f5f3a3fgyatmjgwxxeejna7okndda"),
+ "system": MustParseCid("bafk2bzacebxfzeom3d7ahcz2n2nlwp7ncv767bdbbrisugks4l6v7lcu2tmyg"),
+ "verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"),
+ },
}, {
Network: "caterpillarnet",
- Version: 11,
+ Version: 12,
BundleGitTag: "v11.0.0",
ManifestCid: MustParseCid("bafy2bzacebexc2jgzwr5ngn6jdnkwdqwwmcapajuypdgvopoe6bnvp4yxm4o2"),
Actors: map[string]cid.Cid{
@@ -329,9 +398,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzaceairk5qz5hyzt4yyaxa356aszyifswiust5ilxizwxujcmtzvjzoa"),
"verifiedregistry": MustParseCid("bafk2bzaced2mkyqobpgna5jevosym3adv2bvraggigyz2jgn5cxymirxj4x3i"),
},
+}, {
+ Network: "devnet",
+ Version: 11,
+
+ ManifestCid: MustParseCid("bafy2bzaceay35go4xbjb45km6o46e5bib3bi46panhovcbedrynzwmm3drr4i"),
+ Actors: map[string]cid.Cid{
+ "account": MustParseCid("bafk2bzacecf2pprkbdlpm4e2xz3ufunxtgrgyh2ie3stuqiyhibsvdze7kvri"),
+ "cron": MustParseCid("bafk2bzaceasr5d2skowvzv5mzsyak6waqrgc46ewj6rzbapkfi5woom6n6bwa"),
+ "datacap": MustParseCid("bafk2bzaceaqd77gptubupda7rp7daxkxbkzwc253dxhiyoezxvj2tljmkgpny"),
+ "eam": MustParseCid("bafk2bzacedve6p4ye6zxydjbfs4ode5r2equ7rqzpyltujsq2lu6wyxnijfx4"),
+ "ethaccount": MustParseCid("bafk2bzacea25xfsxwew3h2crer6jlb4c5vwu2gtch2jh73ocuxjhupenyrugy"),
+ "evm": MustParseCid("bafk2bzacece5hivtkmi757lyfahgti7xuqgofodb2u65pxgf6oizfwiiwlcsi"),
+ "init": MustParseCid("bafk2bzacecxnr5y7qifzdqqiwfbjxv2yr7lbkcyu3e2mf5zjdncteupxdlquu"),
+ "multisig": MustParseCid("bafk2bzaceayap4k4u3lbysaeeixct5fvhmafy3fa5eagvdpk3i4a7ubfdpobe"),
+ "paymentchannel": MustParseCid("bafk2bzaceafgrz5wepbein35gie7rnsu7zttxvgllgdneuefmmy4j5izydtza"),
+ "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
+ "reward": MustParseCid("bafk2bzacedwbtfqlx47fdkxjrb5mwiatheci44x3zkpx33smybc2cme23ymuo"),
+ "storagemarket": MustParseCid("bafk2bzaceaj74fmooaf3gj3ebwon64ky7hhdh7kytdr3agclqfrqzmpzykh7g"),
+ "storageminer": MustParseCid("bafk2bzacedb7bokkzzs7hnbhivp74pgcpermuy7j6b3ncodylksukkxtnn7ze"),
+ "storagepower": MustParseCid("bafk2bzacedilnkegizkxz3nuutib4d4wwlk4bkla22loepia2h53yf4hysmq6"),
+ "system": MustParseCid("bafk2bzacedpyoncjbl4oxkjm5e77ngvpy2xfajjc4myfsv2vltvzxioattlu2"),
+ "verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"),
+ },
}, {
Network: "devnet",
- Version: 11,
+ Version: 12,
BundleGitTag: "v11.0.0",
ManifestCid: MustParseCid("bafy2bzaceay35go4xbjb45km6o46e5bib3bi46panhovcbedrynzwmm3drr4i"),
Actors: map[string]cid.Cid{
@@ -435,9 +527,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacedakk5nofebyup4m7nvx6djksfwhnxzrfuq4oyemhpl4lllaikr64"),
"verifiedregistry": MustParseCid("bafk2bzacedfel6edzqpe5oujno7fog4i526go4dtcs6vwrdtbpy2xq6htvcg6"),
},
+}, {
+ Network: "mainnet",
+ Version: 11,
+
+ ManifestCid: MustParseCid("bafy2bzacecnhaiwcrpyjvzl4uv4q3jzoif26okl3m66q3cijp3dfwlcxwztwo"),
+ Actors: map[string]cid.Cid{
+ "account": MustParseCid("bafk2bzacealnlr7st6lkwoh6wxpf2hnrlex5sknaopgmkr2tuhg7vmbfy45so"),
+ "cron": MustParseCid("bafk2bzacebpewdvvgt6tk2o2u4rcovdgym67tadiis5usemlbejg7k3kt567o"),
+ "datacap": MustParseCid("bafk2bzacebslykoyrb2hm7aacjngqgd5n2wmeii2goadrs5zaya3pvdf6pdnq"),
+ "eam": MustParseCid("bafk2bzaceaelwt4yfsfvsu3pa3miwalsvy3cfkcjvmt4sqoeopsppnrmj2mf2"),
+ "ethaccount": MustParseCid("bafk2bzaceclkmc4yidxc6lgcjpfypbde2eddnevcveo4j5kmh4ek6inqysz2k"),
+ "evm": MustParseCid("bafk2bzacediwh6etwzwmb5pivtclpdplewdjzphouwqpppce6opisjv2fjqfe"),
+ "init": MustParseCid("bafk2bzaceckwf3w6n2nw6eh77ktmsxqgsvshonvgnyk5q5syyngtetxvasfxg"),
+ "multisig": MustParseCid("bafk2bzaceafajceqwg5ybiz7xw6rxammuirkgtuv625gzaehsqfprm4bazjmk"),
+ "paymentchannel": MustParseCid("bafk2bzaceb4e6cnsnviegmqvsmoxzncruvhra54piq7bwiqfqevle6oob2gvo"),
+ "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
+ "reward": MustParseCid("bafk2bzacebwjw2vxkobs7r2kwjdqqb42h2kucyuk6flbnyzw4odg5s4mogamo"),
+ "storagemarket": MustParseCid("bafk2bzaceazu2j2zu4p24tr22btnqzkhzjvyjltlvsagaj6w3syevikeb5d7m"),
+ "storageminer": MustParseCid("bafk2bzacec24okjqrp7c7rj3hbrs5ez5apvwah2ruka6haesgfngf37mhk6us"),
+ "storagepower": MustParseCid("bafk2bzaceaxgloxuzg35vu7l7tohdgaq2frsfp4ejmuo7tkoxjp5zqrze6sf4"),
+ "system": MustParseCid("bafk2bzaced7npe5mt5nh72jxr2igi2sofoa7gedt4w6kueeke7i3xxugqpjfm"),
+ "verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"),
+ },
}, {
Network: "mainnet",
- Version: 11,
+ Version: 12,
BundleGitTag: "v11.0.0",
ManifestCid: MustParseCid("bafy2bzacecnhaiwcrpyjvzl4uv4q3jzoif26okl3m66q3cijp3dfwlcxwztwo"),
Actors: map[string]cid.Cid{
@@ -518,9 +633,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacecf2jimdz7knhngs64ximfz3eaud6s3kiunmkybgrkupdjyo2dw7o"),
"verifiedregistry": MustParseCid("bafk2bzacecdmek2htsgcyoyl35glakyab66cojqo2y335njnm7krleb6yfbps"),
},
+}, {
+ Network: "testing",
+ Version: 11,
+
+ ManifestCid: MustParseCid("bafy2bzacea2vxre32tg3xhpejrktiuzx4d3pcoe7yyazgscfibmegmchr6n42"),
+ Actors: map[string]cid.Cid{
+ "account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"),
+ "cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"),
+ "datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"),
+ "eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"),
+ "ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"),
+ "evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"),
+ "init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"),
+ "multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"),
+ "paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"),
+ "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
+ "reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"),
+ "storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"),
+ "storageminer": MustParseCid("bafk2bzaced3yg5lctmswnbkxyd6cleg3llyux7fu2vbddyd2ho36fpym423mq"),
+ "storagepower": MustParseCid("bafk2bzacebvpdf372fzxgixztbz2r7ayxyvx7jmdxwlfuqt2cq7tnqgie3klw"),
+ "system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"),
+ "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"),
+ },
}, {
Network: "testing",
- Version: 11,
+ Version: 12,
BundleGitTag: "v11.0.0",
ManifestCid: MustParseCid("bafy2bzacea2vxre32tg3xhpejrktiuzx4d3pcoe7yyazgscfibmegmchr6n42"),
Actors: map[string]cid.Cid{
@@ -601,9 +739,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacecf2jimdz7knhngs64ximfz3eaud6s3kiunmkybgrkupdjyo2dw7o"),
"verifiedregistry": MustParseCid("bafk2bzacecdmek2htsgcyoyl35glakyab66cojqo2y335njnm7krleb6yfbps"),
},
+}, {
+ Network: "testing-fake-proofs",
+ Version: 11,
+
+ ManifestCid: MustParseCid("bafy2bzacecojemqglhzzhjnhgtrcbsgkyv67ziytvtbhwlr4ym4oxqofv7zui"),
+ Actors: map[string]cid.Cid{
+ "account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"),
+ "cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"),
+ "datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"),
+ "eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"),
+ "ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"),
+ "evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"),
+ "init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"),
+ "multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"),
+ "paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"),
+ "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
+ "reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"),
+ "storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"),
+ "storageminer": MustParseCid("bafk2bzacebqeztpa5exztccqjwqhan5droiy7ga6zekm6f2gzxoe655vneczm"),
+ "storagepower": MustParseCid("bafk2bzaceb2tlyuwxncdxsh3hc4fwcjnpxaijkiv54ustwdjbrqabxdsc27km"),
+ "system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"),
+ "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"),
+ },
}, {
Network: "testing-fake-proofs",
- Version: 11,
+ Version: 12,
BundleGitTag: "v11.0.0",
ManifestCid: MustParseCid("bafy2bzacecojemqglhzzhjnhgtrcbsgkyv67ziytvtbhwlr4ym4oxqofv7zui"),
Actors: map[string]cid.Cid{
diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz
index 340f840af..d0d6cc1ce 100644
Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ
diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz
index 38108ec94..8860d7071 100644
Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ
diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz
index 461aa61a1..3696821da 100644
Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ
diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz
index c8b5a94b0..207ba656e 100644
Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ
diff --git a/build/params_2k.go b/build/params_2k.go
index c3199e2d6..5e0f5c60d 100644
--- a/build/params_2k.go
+++ b/build/params_2k.go
@@ -23,7 +23,7 @@ var NetworkBundle = "devnet"
var BundleOverrides map[actorstypes.Version]string
var ActorDebugging = true
-const GenesisNetworkVersion = network.Version18
+const GenesisNetworkVersion = network.Version20
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
@@ -61,9 +61,11 @@ var UpgradeSharkHeight = abi.ChainEpoch(-20)
var UpgradeHyggeHeight = abi.ChainEpoch(-21)
-var UpgradeLightningHeight = abi.ChainEpoch(30)
+var UpgradeLightningHeight = abi.ChainEpoch(-22)
-var UpgradeThunderHeight = abi.ChainEpoch(1000)
+var UpgradeThunderHeight = abi.ChainEpoch(-23)
+
+var UpgradeWatermelonHeight = abi.ChainEpoch(200)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
@@ -120,6 +122,7 @@ func init() {
UpgradeHyggeHeight = getUpgradeHeight("LOTUS_HYGGE_HEIGHT", UpgradeHyggeHeight)
UpgradeLightningHeight = getUpgradeHeight("LOTUS_LIGHTNING_HEIGHT", UpgradeLightningHeight)
UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight)
+ UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight)
BuildType |= Build2k
@@ -129,6 +132,8 @@ const BlockDelaySecs = uint64(4)
const PropagationDelaySecs = uint64(1)
+var EquivocationDelaySecs = uint64(0)
+
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
// which the miner is slashed
//
diff --git a/build/params_butterfly.go b/build/params_butterfly.go
index e26fb4ad1..7d3b613c9 100644
--- a/build/params_butterfly.go
+++ b/build/params_butterfly.go
@@ -57,6 +57,9 @@ const UpgradeLightningHeight = 50
const UpgradeThunderHeight = UpgradeLightningHeight + 360
+// ??????????
+const UpgradeWatermelonHeight = 999999999999999
+
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1,
@@ -83,6 +86,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
+var EquivocationDelaySecs = uint64(2)
+
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 2
diff --git a/build/params_calibnet.go b/build/params_calibnet.go
index e8b1c9d7e..4081d4c1d 100644
--- a/build/params_calibnet.go
+++ b/build/params_calibnet.go
@@ -79,6 +79,9 @@ const UpgradeLightningHeight = 489094
// 2023-04-21T16:00:00Z
const UpgradeThunderHeight = UpgradeLightningHeight + 3120
+// ??????????
+const UpgradeWatermelonHeight = 999999999999999
+
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
@@ -120,6 +123,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
var PropagationDelaySecs = uint64(10)
+var EquivocationDelaySecs = uint64(2)
+
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
diff --git a/build/params_interop.go b/build/params_interop.go
index 04fc777f5..2b2f36160 100644
--- a/build/params_interop.go
+++ b/build/params_interop.go
@@ -52,8 +52,9 @@ var UpgradeSkyrHeight = abi.ChainEpoch(-19)
var UpgradeSharkHeight = abi.ChainEpoch(-20)
var UpgradeHyggeHeight = abi.ChainEpoch(-21)
var UpgradeLightningHeight = abi.ChainEpoch(-22)
+var UpgradeThunderHeight = abi.ChainEpoch(-23)
-const UpgradeThunderHeight = 50
+const UpgradeWatermelonHeight = 50
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
@@ -120,6 +121,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
+var EquivocationDelaySecs = uint64(2)
+
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 2
diff --git a/build/params_mainnet.go b/build/params_mainnet.go
index 53eeb2091..f35ac45ef 100644
--- a/build/params_mainnet.go
+++ b/build/params_mainnet.go
@@ -90,10 +90,13 @@ const UpgradeSharkHeight = 2383680
const UpgradeHyggeHeight = 2683348
// 2023-04-27T13:00:00Z
-var UpgradeLightningHeight = abi.ChainEpoch(2809800)
+const UpgradeLightningHeight = 2809800
// 2023-05-18T13:00:00Z
-var UpgradeThunderHeight = UpgradeLightningHeight + 2880*21
+const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21
+
+// ???????
+var UpgradeWatermelonHeight = abi.ChainEpoch(9999999999)
var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1,
@@ -103,17 +106,15 @@ var ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
var PreCommitChallengeDelay = abi.ChainEpoch(150)
var PropagationDelaySecs = uint64(10)
+var EquivocationDelaySecs = uint64(2)
+
func init() {
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet)
}
- if os.Getenv("LOTUS_DISABLE_LIGHTNING") == "1" {
- UpgradeLightningHeight = math.MaxInt64
- }
-
- if os.Getenv("LOTUS_DISABLE_THUNDER") == "1" {
- UpgradeThunderHeight = math.MaxInt64
+ if os.Getenv("LOTUS_DISABLE_WATERMELON") == "1" {
+ UpgradeWatermelonHeight = math.MaxInt64
}
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go
index dd7386863..1d15c2fe8 100644
--- a/build/params_shared_vals.go
+++ b/build/params_shared_vals.go
@@ -30,7 +30,7 @@ const AllowableClockDriftSecs = uint64(1)
/* inline-gen template
const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
/* inline-gen start */
-const TestNetworkVersion = network.Version20
+const TestNetworkVersion = network.Version21
/* inline-gen end */
diff --git a/build/params_testground.go b/build/params_testground.go
index 278edd40b..46d2e16de 100644
--- a/build/params_testground.go
+++ b/build/params_testground.go
@@ -9,7 +9,6 @@ package build
import (
"math/big"
- "time"
"github.com/ipfs/go-cid"
@@ -34,6 +33,7 @@ var (
MinimumBaseFee = int64(100)
BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
PropagationDelaySecs = uint64(6)
+ EquivocationDelaySecs = uint64(2)
SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1,
@@ -109,6 +109,7 @@ var (
UpgradeHyggeHeight abi.ChainEpoch = -20
UpgradeLightningHeight abi.ChainEpoch = -21
UpgradeThunderHeight abi.ChainEpoch = -22
+ UpgradeWatermelonHeight abi.ChainEpoch = -23
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
@@ -138,7 +139,3 @@ const BootstrapPeerThreshold = 1
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
// As per https://github.com/ethereum-lists/chains
const Eip155ChainId = 31415926
-
-// Reducing the delivery delay for equivocation of
-// consistent broadcast to just half a second.
-var CBDeliveryDelay = 500 * time.Millisecond
diff --git a/build/version.go b/build/version.go
index 56a2cc0ab..a1c7c9df0 100644
--- a/build/version.go
+++ b/build/version.go
@@ -37,7 +37,7 @@ func BuildTypeString() string {
}
// BuildVersion is the local build version
-const BuildVersion = "1.23.3-dev"
+const BuildVersion = "1.23.5-dev"
func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go
index a29248d56..dcb60f801 100644
--- a/chain/actors/builtin/account/account.go
+++ b/chain/actors/builtin/account/account.go
@@ -6,7 +6,7 @@ import (
"github.com/filecoin-project/go-address"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@@ -22,7 +22,7 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
-var Methods = builtin11.MethodsAccount
+var Methods = builtin12.MethodsAccount
func Load(store adt.Store, act *types.Actor) (State, error) {
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
@@ -44,6 +44,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -111,6 +114,9 @@ func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (S
case actorstypes.Version11:
return make11(store, addr)
+ case actorstypes.Version12:
+ return make12(store, addr)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -139,5 +145,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/account/v12.go b/chain/actors/builtin/account/v12.go
new file mode 100644
index 000000000..af2c4186f
--- /dev/null
+++ b/chain/actors/builtin/account/v12.go
@@ -0,0 +1,62 @@
+package account
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ account12 "github.com/filecoin-project/go-state-types/builtin/v12/account"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, addr address.Address) (State, error) {
+ out := state12{store: store}
+ out.State = account12.State{Address: addr}
+ return &out, nil
+}
+
+type state12 struct {
+ account12.State
+ store adt.Store
+}
+
+func (s *state12) PubkeyAddress() (address.Address, error) {
+ return s.Address, nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.AccountKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go
index c2f758698..17b291788 100644
--- a/chain/actors/builtin/cron/cron.go
+++ b/chain/actors/builtin/cron/cron.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@@ -40,6 +40,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -107,13 +110,16 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
case actorstypes.Version11:
return make11(store)
+ case actorstypes.Version12:
+ return make12(store)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
var (
- Address = builtin11.CronActorAddr
- Methods = builtin11.MethodsCron
+ Address = builtin12.CronActorAddr
+ Methods = builtin12.MethodsCron
)
type State interface {
@@ -137,5 +143,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/cron/v12.go b/chain/actors/builtin/cron/v12.go
new file mode 100644
index 000000000..44f018d68
--- /dev/null
+++ b/chain/actors/builtin/cron/v12.go
@@ -0,0 +1,57 @@
+package cron
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ cron12 "github.com/filecoin-project/go-state-types/builtin/v12/cron"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store) (State, error) {
+ out := state12{store: store}
+ out.State = *cron12.ConstructState(cron12.BuiltInEntries())
+ return &out, nil
+}
+
+type state12 struct {
+ cron12.State
+ store adt.Store
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.CronKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/datacap/datacap.go b/chain/actors/builtin/datacap/datacap.go
index 3cf557e6c..0c8f04bbf 100644
--- a/chain/actors/builtin/datacap/datacap.go
+++ b/chain/actors/builtin/datacap/datacap.go
@@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
@@ -17,8 +17,8 @@ import (
)
var (
- Address = builtin11.DatacapActorAddr
- Methods = builtin11.MethodsDatacap
+ Address = builtin12.DatacapActorAddr
+ Methods = builtin12.MethodsDatacap
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@@ -38,6 +38,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -56,6 +59,9 @@ func MakeState(store adt.Store, av actorstypes.Version, governor address.Address
case actorstypes.Version11:
return make11(store, governor, bitwidth)
+ case actorstypes.Version12:
+ return make12(store, governor, bitwidth)
+
default:
return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av)
}
@@ -79,5 +85,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/datacap/v12.go b/chain/actors/builtin/datacap/v12.go
new file mode 100644
index 000000000..91563a2b6
--- /dev/null
+++ b/chain/actors/builtin/datacap/v12.go
@@ -0,0 +1,82 @@
+package datacap
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ datacap12 "github.com/filecoin-project/go-state-types/builtin/v12/datacap"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, governor address.Address, bitwidth uint64) (State, error) {
+ out := state12{store: store}
+ s, err := datacap12.ConstructState(store, governor, bitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state12 struct {
+ datacap12.State
+ store adt.Store
+}
+
+func (s *state12) Governor() (address.Address, error) {
+ return s.State.Governor, nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachClient(s.store, actors.Version12, s.verifiedClients, cb)
+}
+
+func (s *state12) verifiedClients() (adt.Map, error) {
+ return adt12.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth))
+}
+
+func (s *state12) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version12, s.verifiedClients, addr)
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.DatacapKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/evm/evm.go b/chain/actors/builtin/evm/evm.go
index 7c28295f2..98f860cac 100644
--- a/chain/actors/builtin/evm/evm.go
+++ b/chain/actors/builtin/evm/evm.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
@@ -14,7 +14,7 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
-var Methods = builtin11.MethodsEVM
+var Methods = builtin12.MethodsEVM
func Load(store adt.Store, act *types.Actor) (State, error) {
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
@@ -30,6 +30,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -45,6 +48,9 @@ func MakeState(store adt.Store, av actorstypes.Version, bytecode cid.Cid) (State
case actorstypes.Version11:
return make11(store, bytecode)
+ case actorstypes.Version12:
+ return make12(store, bytecode)
+
default:
return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av)
}
diff --git a/chain/actors/builtin/evm/v12.go b/chain/actors/builtin/evm/v12.go
new file mode 100644
index 000000000..a107368fa
--- /dev/null
+++ b/chain/actors/builtin/evm/v12.go
@@ -0,0 +1,72 @@
+package evm
+
+import (
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ evm12 "github.com/filecoin-project/go-state-types/builtin/v12/evm"
+
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, bytecode cid.Cid) (State, error) {
+ out := state12{store: store}
+ s, err := evm12.ConstructState(store, bytecode)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state12 struct {
+ evm12.State
+ store adt.Store
+}
+
+func (s *state12) Nonce() (uint64, error) {
+ return s.State.Nonce, nil
+}
+
+func (s *state12) IsAlive() (bool, error) {
+ return s.State.Tombstone == nil, nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) GetBytecodeCID() (cid.Cid, error) {
+ return s.State.Bytecode, nil
+}
+
+func (s *state12) GetBytecodeHash() ([32]byte, error) {
+ return s.State.BytecodeHash, nil
+}
+
+func (s *state12) GetBytecode() ([]byte, error) {
+ bc, err := s.GetBytecodeCID()
+ if err != nil {
+ return nil, err
+ }
+
+ var byteCode abi.CborBytesTransparent
+ if err := s.store.Get(s.store.Context(), bc, &byteCode); err != nil {
+ return nil, err
+ }
+
+ return byteCode, nil
+}
diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go
index 2d9e41275..41a763ecf 100644
--- a/chain/actors/builtin/init/init.go
+++ b/chain/actors/builtin/init/init.go
@@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@@ -25,8 +25,8 @@ import (
)
var (
- Address = builtin11.InitActorAddr
- Methods = builtin11.MethodsInit
+ Address = builtin12.InitActorAddr
+ Methods = builtin12.MethodsInit
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@@ -49,6 +49,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -116,6 +119,9 @@ func MakeState(store adt.Store, av actorstypes.Version, networkName string) (Sta
case actorstypes.Version11:
return make11(store, networkName)
+ case actorstypes.Version12:
+ return make12(store, networkName)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -167,5 +173,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/init/v12.go b/chain/actors/builtin/init/v12.go
new file mode 100644
index 000000000..3eab7a740
--- /dev/null
+++ b/chain/actors/builtin/init/v12.go
@@ -0,0 +1,147 @@
+package init
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
+ init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/node/modules/dtypes"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, networkName string) (State, error) {
+ out := state12{store: store}
+
+ s, err := init12.ConstructState(store, networkName)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state12 struct {
+ init12.State
+ store adt.Store
+}
+
+func (s *state12) ResolveAddress(address address.Address) (address.Address, bool, error) {
+ return s.State.ResolveAddress(s.store, address)
+}
+
+func (s *state12) MapAddressToNewID(address address.Address) (address.Address, error) {
+ return s.State.MapAddressToNewID(s.store, address)
+}
+
+func (s *state12) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
+ addrs, err := adt12.AsMap(s.store, s.State.AddressMap, builtin12.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var actorID cbg.CborInt
+ return addrs.ForEach(&actorID, func(key string) error {
+ addr, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(abi.ActorID(actorID), addr)
+ })
+}
+
+func (s *state12) NetworkName() (dtypes.NetworkName, error) {
+ return dtypes.NetworkName(s.State.NetworkName), nil
+}
+
+func (s *state12) SetNetworkName(name string) error {
+ s.State.NetworkName = name
+ return nil
+}
+
+func (s *state12) SetNextID(id abi.ActorID) error {
+ s.State.NextID = id
+ return nil
+}
+
+func (s *state12) Remove(addrs ...address.Address) (err error) {
+ m, err := adt12.AsMap(s.store, s.State.AddressMap, builtin12.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ for _, addr := range addrs {
+ if err = m.Delete(abi.AddrKey(addr)); err != nil {
+ return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
+ }
+ }
+ amr, err := m.Root()
+ if err != nil {
+ return xerrors.Errorf("failed to get address map root: %w", err)
+ }
+ s.State.AddressMap = amr
+ return nil
+}
+
+func (s *state12) SetAddressMap(mcid cid.Cid) error {
+ s.State.AddressMap = mcid
+ return nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) AddressMap() (adt.Map, error) {
+ return adt12.AsMap(s.store, s.State.AddressMap, builtin12.DefaultHamtBitwidth)
+}
+
+func (s *state12) AddressMapBitWidth() int {
+ return builtin12.DefaultHamtBitwidth
+}
+
+func (s *state12) AddressMapHashFunction() func(input []byte) []byte {
+ return func(input []byte) []byte {
+ res := sha256.Sum256(input)
+ return res[:]
+ }
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.InitKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go
index 36936e787..39473d560 100644
--- a/chain/actors/builtin/market/market.go
+++ b/chain/actors/builtin/market/market.go
@@ -55,6 +55,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
case actorstypes.Version11:
return make11(store)
+ case actorstypes.Version12:
+ return make12(store)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -217,6 +223,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
case actorstypes.Version11:
return decodePublishStorageDealsReturn11(b)
+ case actorstypes.Version12:
+ return decodePublishStorageDealsReturn12(b)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -303,5 +312,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/market/v12.go b/chain/actors/builtin/market/v12.go
new file mode 100644
index 000000000..3532fc4f4
--- /dev/null
+++ b/chain/actors/builtin/market/v12.go
@@ -0,0 +1,377 @@
+package market
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ "github.com/filecoin-project/go-state-types/builtin"
+ market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store) (State, error) {
+ out := state12{store: store}
+
+ s, err := market12.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state12 struct {
+ market12.State
+ store adt.Store
+}
+
+func (s *state12) TotalLocked() (abi.TokenAmount, error) {
+ fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
+ fml = types.BigAdd(fml, s.TotalClientStorageFee)
+ return fml, nil
+}
+
+func (s *state12) BalancesChanged(otherState State) (bool, error) {
+ otherState12, ok := otherState.(*state12)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.EscrowTable.Equals(otherState12.State.EscrowTable) || !s.State.LockedTable.Equals(otherState12.State.LockedTable), nil
+}
+
+func (s *state12) StatesChanged(otherState State) (bool, error) {
+ otherState12, ok := otherState.(*state12)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.States.Equals(otherState12.State.States), nil
+}
+
+func (s *state12) States() (DealStates, error) {
+ stateArray, err := adt12.AsArray(s.store, s.State.States, market12.StatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealStates12{stateArray}, nil
+}
+
+func (s *state12) ProposalsChanged(otherState State) (bool, error) {
+ otherState12, ok := otherState.(*state12)
+ if !ok {
+ // there's no way to compare different versions of the state, so let's
+ // just say that means the state of balances has changed
+ return true, nil
+ }
+ return !s.State.Proposals.Equals(otherState12.State.Proposals), nil
+}
+
+func (s *state12) Proposals() (DealProposals, error) {
+ proposalArray, err := adt12.AsArray(s.store, s.State.Proposals, market12.ProposalsAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+ return &dealProposals12{proposalArray}, nil
+}
+
+func (s *state12) EscrowTable() (BalanceTable, error) {
+ bt, err := adt12.AsBalanceTable(s.store, s.State.EscrowTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable12{bt}, nil
+}
+
+func (s *state12) LockedTable() (BalanceTable, error) {
+ bt, err := adt12.AsBalanceTable(s.store, s.State.LockedTable)
+ if err != nil {
+ return nil, err
+ }
+ return &balanceTable12{bt}, nil
+}
+
+func (s *state12) VerifyDealsForActivation(
+ minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
+) (weight, verifiedWeight abi.DealWeight, err error) {
+ w, vw, _, err := market12.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
+ return w, vw, err
+}
+
+func (s *state12) NextID() (abi.DealID, error) {
+ return s.State.NextID, nil
+}
+
+type balanceTable12 struct {
+ *adt12.BalanceTable
+}
+
+func (bt *balanceTable12) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
+ asMap := (*adt12.Map)(bt.BalanceTable)
+ var ta abi.TokenAmount
+ return asMap.ForEach(&ta, func(key string) error {
+ a, err := address.NewFromBytes([]byte(key))
+ if err != nil {
+ return err
+ }
+ return cb(a, ta)
+ })
+}
+
+type dealStates12 struct {
+ adt.Array
+}
+
+func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) {
+ var deal12 market12.DealState
+ found, err := s.Array.Get(uint64(dealID), &deal12)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+ deal := fromV12DealState(deal12)
+ return &deal, true, nil
+}
+
+func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
+ var ds12 market12.DealState
+ return s.Array.ForEach(&ds12, func(idx int64) error {
+ return cb(abi.DealID(idx), fromV12DealState(ds12))
+ })
+}
+
+func (s *dealStates12) decode(val *cbg.Deferred) (*DealState, error) {
+ var ds12 market12.DealState
+ if err := ds12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+ ds := fromV12DealState(ds12)
+ return &ds, nil
+}
+
+func (s *dealStates12) array() adt.Array {
+ return s.Array
+}
+
+func fromV12DealState(v12 market12.DealState) DealState {
+ ret := DealState{
+ SectorStartEpoch: v12.SectorStartEpoch,
+ LastUpdatedEpoch: v12.LastUpdatedEpoch,
+ SlashEpoch: v12.SlashEpoch,
+ VerifiedClaim: 0,
+ }
+
+ ret.VerifiedClaim = verifregtypes.AllocationId(v12.VerifiedClaim)
+
+ return ret
+}
+
+type dealProposals12 struct {
+ adt.Array
+}
+
+func (s *dealProposals12) Get(dealID abi.DealID) (*DealProposal, bool, error) {
+ var proposal12 market12.DealProposal
+ found, err := s.Array.Get(uint64(dealID), &proposal12)
+ if err != nil {
+ return nil, false, err
+ }
+ if !found {
+ return nil, false, nil
+ }
+
+ proposal, err := fromV12DealProposal(proposal12)
+ if err != nil {
+ return nil, true, xerrors.Errorf("decoding proposal: %w", err)
+ }
+
+ return &proposal, true, nil
+}
+
+func (s *dealProposals12) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
+ var dp12 market12.DealProposal
+ return s.Array.ForEach(&dp12, func(idx int64) error {
+ dp, err := fromV12DealProposal(dp12)
+ if err != nil {
+ return xerrors.Errorf("decoding proposal: %w", err)
+ }
+
+ return cb(abi.DealID(idx), dp)
+ })
+}
+
+func (s *dealProposals12) decode(val *cbg.Deferred) (*DealProposal, error) {
+ var dp12 market12.DealProposal
+ if err := dp12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return nil, err
+ }
+
+ dp, err := fromV12DealProposal(dp12)
+ if err != nil {
+ return nil, err
+ }
+
+ return &dp, nil
+}
+
+func (s *dealProposals12) array() adt.Array {
+ return s.Array
+}
+
+func fromV12DealProposal(v12 market12.DealProposal) (DealProposal, error) {
+
+ label, err := fromV12Label(v12.Label)
+
+ if err != nil {
+ return DealProposal{}, xerrors.Errorf("error setting deal label: %w", err)
+ }
+
+ return DealProposal{
+ PieceCID: v12.PieceCID,
+ PieceSize: v12.PieceSize,
+ VerifiedDeal: v12.VerifiedDeal,
+ Client: v12.Client,
+ Provider: v12.Provider,
+
+ Label: label,
+
+ StartEpoch: v12.StartEpoch,
+ EndEpoch: v12.EndEpoch,
+ StoragePricePerEpoch: v12.StoragePricePerEpoch,
+
+ ProviderCollateral: v12.ProviderCollateral,
+ ClientCollateral: v12.ClientCollateral,
+ }, nil
+}
+
+func fromV12Label(v12 market12.DealLabel) (DealLabel, error) {
+ if v12.IsString() {
+ str, err := v12.ToString()
+ if err != nil {
+ return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert string label to string: %w", err)
+ }
+ return markettypes.NewLabelFromString(str)
+ }
+
+ bs, err := v12.ToBytes()
+ if err != nil {
+ return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert bytes label to bytes: %w", err)
+ }
+ return markettypes.NewLabelFromBytes(bs)
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+var _ PublishStorageDealsReturn = (*publishStorageDealsReturn12)(nil)
+
+func decodePublishStorageDealsReturn12(b []byte) (PublishStorageDealsReturn, error) {
+ var retval market12.PublishStorageDealsReturn
+ if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
+ return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
+ }
+
+ return &publishStorageDealsReturn12{retval}, nil
+}
+
+type publishStorageDealsReturn12 struct {
+ market12.PublishStorageDealsReturn
+}
+
+func (r *publishStorageDealsReturn12) IsDealValid(index uint64) (bool, int, error) {
+
+ set, err := r.ValidDeals.IsSet(index)
+ if err != nil || !set {
+ return false, -1, err
+ }
+ maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
+ Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}})
+ if err != nil {
+ return false, -1, err
+ }
+ before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals)
+ if err != nil {
+ return false, -1, err
+ }
+ outIdx, err := before.Count()
+ if err != nil {
+ return false, -1, err
+ }
+ return set, int(outIdx), nil
+
+}
+
+func (r *publishStorageDealsReturn12) DealIDs() ([]abi.DealID, error) {
+ return r.IDs, nil
+}
+
+func (s *state12) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) {
+
+ allocations, err := adt12.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth)
+ if err != nil {
+ return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
+ }
+
+ var allocationId cbg.CborInt
+ found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId)
+ if err != nil {
+ return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
+ }
+ if !found {
+ return verifregtypes.NoAllocationID, nil
+ }
+
+ return verifregtypes.AllocationId(allocationId), nil
+
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.MarketKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go
index 1433945d9..3ad17b033 100644
--- a/chain/actors/builtin/miner/miner.go
+++ b/chain/actors/builtin/miner/miner.go
@@ -48,6 +48,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -115,6 +118,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version11:
return make11(store)
+ case actors.Version12:
+ return make12(store)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -321,5 +327,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/miner/v12.go b/chain/actors/builtin/miner/v12.go
new file mode 100644
index 000000000..787da7d0f
--- /dev/null
+++ b/chain/actors/builtin/miner/v12.go
@@ -0,0 +1,591 @@
+package miner
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-bitfield"
+ rle "github.com/filecoin-project/go-bitfield/rle"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
+ miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ "github.com/filecoin-project/go-state-types/dline"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store) (State, error) {
+ out := state12{store: store}
+ out.State = miner12.State{}
+ return &out, nil
+}
+
+type state12 struct {
+ miner12.State
+ store adt.Store
+}
+
+type deadline12 struct {
+ miner12.Deadline
+ store adt.Store
+}
+
+type partition12 struct {
+ miner12.Partition
+ store adt.Store
+}
+
+func (s *state12) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = xerrors.Errorf("failed to get available balance: %w", r)
+ available = abi.NewTokenAmount(0)
+ }
+ }()
+ // this panics if the miner doesnt have enough funds to cover their locked pledge
+ available, err = s.GetAvailableBalance(bal)
+ return available, err
+}
+
+func (s *state12) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.CheckVestedFunds(s.store, epoch)
+}
+
+func (s *state12) LockedFunds() (LockedFunds, error) {
+ return LockedFunds{
+ VestingFunds: s.State.LockedFunds,
+ InitialPledgeRequirement: s.State.InitialPledge,
+ PreCommitDeposits: s.State.PreCommitDeposits,
+ }, nil
+}
+
+func (s *state12) FeeDebt() (abi.TokenAmount, error) {
+ return s.State.FeeDebt, nil
+}
+
+func (s *state12) InitialPledge() (abi.TokenAmount, error) {
+ return s.State.InitialPledge, nil
+}
+
+func (s *state12) PreCommitDeposits() (abi.TokenAmount, error) {
+ return s.State.PreCommitDeposits, nil
+}
+
+// Returns nil, nil if sector is not found
+func (s *state12) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
+ info, ok, err := s.State.GetSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV12SectorOnChainInfo(*info)
+ return &ret, nil
+}
+
+func (s *state12) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
+ dlIdx, partIdx, err := s.State.FindSector(s.store, num)
+ if err != nil {
+ return nil, err
+ }
+ return &SectorLocation{
+ Deadline: dlIdx,
+ Partition: partIdx,
+ }, nil
+}
+
+func (s *state12) NumLiveSectors() (uint64, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return 0, err
+ }
+ var total uint64
+ if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner12.Deadline) error {
+ total += dl.LiveSectors
+ return nil
+ }); err != nil {
+ return 0, err
+ }
+ return total, nil
+}
+
+// GetSectorExpiration returns the effective expiration of the given sector.
+//
+// If the sector does not expire early, the Early expiration field is 0.
+func (s *state12) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ // NOTE: this can be optimized significantly.
+ // 1. If the sector is non-faulty, it will expire on-time (can be
+ // learned from the sector info).
+ // 2. If it's faulty, it will expire early within the first 42 entries
+ // of the expiration queue.
+
+ stopErr := errors.New("stop")
+ out := SectorExpiration{}
+ err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner12.Deadline) error {
+ partitions, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+ quant := s.State.QuantSpecForDeadline(dlIdx)
+ var part miner12.Partition
+ return partitions.ForEach(&part, func(partIdx int64) error {
+ if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if !found {
+ return nil
+ }
+ if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
+ return err
+ } else if found {
+ // already terminated
+ return stopErr
+ }
+
+ q, err := miner12.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner12.PartitionExpirationAmtBitwidth)
+ if err != nil {
+ return err
+ }
+ var exp miner12.ExpirationSet
+ return q.ForEach(&exp, func(epoch int64) error {
+ if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if early {
+ out.Early = abi.ChainEpoch(epoch)
+ return nil
+ }
+ if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
+ return err
+ } else if onTime {
+ out.OnTime = abi.ChainEpoch(epoch)
+ return stopErr
+ }
+ return nil
+ })
+ })
+ })
+ if err == stopErr {
+ err = nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ if out.Early == 0 && out.OnTime == 0 {
+ return nil, xerrors.Errorf("failed to find sector %d", num)
+ }
+ return &out, nil
+}
+
+func (s *state12) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
+ info, ok, err := s.State.GetPrecommittedSector(s.store, num)
+ if !ok || err != nil {
+ return nil, err
+ }
+
+ ret := fromV12SectorPreCommitOnChainInfo(*info)
+
+ return &ret, nil
+}
+
+func (s *state12) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
+ precommitted, err := adt12.AsMap(s.store, s.State.PreCommittedSectors, builtin12.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+
+ var info miner12.SectorPreCommitOnChainInfo
+ if err := precommitted.ForEach(&info, func(_ string) error {
+ return cb(fromV12SectorPreCommitOnChainInfo(info))
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (s *state12) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
+ sectors, err := miner12.LoadSectors(s.store, s.State.Sectors)
+ if err != nil {
+ return nil, err
+ }
+
+ // If no sector numbers are specified, load all.
+ if snos == nil {
+ infos := make([]*SectorOnChainInfo, 0, sectors.Length())
+ var info12 miner12.SectorOnChainInfo
+ if err := sectors.ForEach(&info12, func(_ int64) error {
+ info := fromV12SectorOnChainInfo(info12)
+ infos = append(infos, &info)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return infos, nil
+ }
+
+ // Otherwise, load selected.
+ infos12, err := sectors.Load(*snos)
+ if err != nil {
+ return nil, err
+ }
+ infos := make([]*SectorOnChainInfo, len(infos12))
+ for i, info12 := range infos12 {
+ info := fromV12SectorOnChainInfo(*info12)
+ infos[i] = &info
+ }
+ return infos, nil
+}
+
+func (s *state12) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
+ return allocatedSectors, err
+}
+
+func (s *state12) IsAllocated(num abi.SectorNumber) (bool, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return false, err
+ }
+
+ return allocatedSectors.IsSet(uint64(num))
+}
+
+func (s *state12) GetProvingPeriodStart() (abi.ChainEpoch, error) {
+ return s.State.ProvingPeriodStart, nil
+}
+
+func (s *state12) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
+ allocatedSectors, err := s.loadAllocatedSectorNumbers()
+ if err != nil {
+ return nil, err
+ }
+
+ allocatedRuns, err := allocatedSectors.RunIterator()
+ if err != nil {
+ return nil, err
+ }
+
+ unallocatedRuns, err := rle.Subtract(
+ &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
+ allocatedRuns,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ iter, err := rle.BitsFromRuns(unallocatedRuns)
+ if err != nil {
+ return nil, err
+ }
+
+ sectors := make([]abi.SectorNumber, 0, count)
+ for iter.HasNext() && len(sectors) < count {
+ nextNo, err := iter.Next()
+ if err != nil {
+ return nil, err
+ }
+ sectors = append(sectors, abi.SectorNumber(nextNo))
+ }
+
+ return sectors, nil
+}
+
+func (s *state12) GetAllocatedSectors() (*bitfield.BitField, error) {
+ var allocatedSectors bitfield.BitField
+ if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
+ return nil, err
+ }
+
+ return &allocatedSectors, nil
+}
+
+func (s *state12) LoadDeadline(idx uint64) (Deadline, error) {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return nil, err
+ }
+ dl, err := dls.LoadDeadline(s.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &deadline12{*dl, s.store}, nil
+}
+
+func (s *state12) ForEachDeadline(cb func(uint64, Deadline) error) error {
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+ return dls.ForEach(s.store, func(i uint64, dl *miner12.Deadline) error {
+ return cb(i, &deadline12{*dl, s.store})
+ })
+}
+
+func (s *state12) NumDeadlines() (uint64, error) {
+ return miner12.WPoStPeriodDeadlines, nil
+}
+
+func (s *state12) DeadlinesChanged(other State) (bool, error) {
+ other12, ok := other.(*state12)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !s.State.Deadlines.Equals(other12.Deadlines), nil
+}
+
+func (s *state12) MinerInfoChanged(other State) (bool, error) {
+ other0, ok := other.(*state12)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Info.Equals(other0.State.Info), nil
+}
+
+func (s *state12) Info() (MinerInfo, error) {
+ info, err := s.State.GetInfo(s.store)
+ if err != nil {
+ return MinerInfo{}, err
+ }
+
+ mi := MinerInfo{
+ Owner: info.Owner,
+ Worker: info.Worker,
+ ControlAddresses: info.ControlAddresses,
+
+ PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey),
+
+ PeerId: info.PeerId,
+ Multiaddrs: info.Multiaddrs,
+ WindowPoStProofType: info.WindowPoStProofType,
+ SectorSize: info.SectorSize,
+ WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
+ ConsensusFaultElapsed: info.ConsensusFaultElapsed,
+
+ Beneficiary: info.Beneficiary,
+ BeneficiaryTerm: BeneficiaryTerm(info.BeneficiaryTerm),
+ PendingBeneficiaryTerm: (*PendingBeneficiaryChange)(info.PendingBeneficiaryTerm),
+ }
+
+ return mi, nil
+}
+
+func (s *state12) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
+ return s.State.RecordedDeadlineInfo(epoch), nil
+}
+
+func (s *state12) DeadlineCronActive() (bool, error) {
+ return s.State.DeadlineCronActive, nil
+}
+
+func (s *state12) sectors() (adt.Array, error) {
+ return adt12.AsArray(s.store, s.Sectors, miner12.SectorsAmtBitwidth)
+}
+
+func (s *state12) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
+ var si miner12.SectorOnChainInfo
+ err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorOnChainInfo{}, err
+ }
+
+ return fromV12SectorOnChainInfo(si), nil
+}
+
+func (s *state12) precommits() (adt.Map, error) {
+ return adt12.AsMap(s.store, s.PreCommittedSectors, builtin12.DefaultHamtBitwidth)
+}
+
+func (s *state12) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
+ var sp miner12.SectorPreCommitOnChainInfo
+ err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
+ if err != nil {
+ return SectorPreCommitOnChainInfo{}, err
+ }
+
+ return fromV12SectorPreCommitOnChainInfo(sp), nil
+}
+
+func (s *state12) EraseAllUnproven() error {
+
+ dls, err := s.State.LoadDeadlines(s.store)
+ if err != nil {
+ return err
+ }
+
+ err = dls.ForEach(s.store, func(dindx uint64, dl *miner12.Deadline) error {
+ ps, err := dl.PartitionsArray(s.store)
+ if err != nil {
+ return err
+ }
+
+ var part miner12.Partition
+ err = ps.ForEach(&part, func(pindx int64) error {
+ _ = part.ActivateUnproven()
+ err = ps.Set(uint64(pindx), &part)
+ return nil
+ })
+
+ if err != nil {
+ return err
+ }
+
+ dl.Partitions, err = ps.Root()
+ if err != nil {
+ return err
+ }
+
+ return dls.UpdateDeadline(s.store, dindx, dl)
+ })
+ if err != nil {
+ return err
+ }
+
+ return s.State.SaveDeadlines(s.store, dls)
+
+}
+
+func (d *deadline12) LoadPartition(idx uint64) (Partition, error) {
+ p, err := d.Deadline.LoadPartition(d.store, idx)
+ if err != nil {
+ return nil, err
+ }
+ return &partition12{*p, d.store}, nil
+}
+
+func (d *deadline12) ForEachPartition(cb func(uint64, Partition) error) error {
+ ps, err := d.Deadline.PartitionsArray(d.store)
+ if err != nil {
+ return err
+ }
+ var part miner12.Partition
+ return ps.ForEach(&part, func(i int64) error {
+ return cb(uint64(i), &partition12{part, d.store})
+ })
+}
+
+func (d *deadline12) PartitionsChanged(other Deadline) (bool, error) {
+ other12, ok := other.(*deadline12)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+
+ return !d.Deadline.Partitions.Equals(other12.Deadline.Partitions), nil
+}
+
+func (d *deadline12) PartitionsPoSted() (bitfield.BitField, error) {
+ return d.Deadline.PartitionsPoSted, nil
+}
+
+func (d *deadline12) DisputableProofCount() (uint64, error) {
+
+ ops, err := d.OptimisticProofsSnapshotArray(d.store)
+ if err != nil {
+ return 0, err
+ }
+
+ return ops.Length(), nil
+
+}
+
+func (p *partition12) AllSectors() (bitfield.BitField, error) {
+ return p.Partition.Sectors, nil
+}
+
+func (p *partition12) FaultySectors() (bitfield.BitField, error) {
+ return p.Partition.Faults, nil
+}
+
+func (p *partition12) RecoveringSectors() (bitfield.BitField, error) {
+ return p.Partition.Recoveries, nil
+}
+
+func (p *partition12) UnprovenSectors() (bitfield.BitField, error) {
+ return p.Partition.Unproven, nil
+}
+
+func fromV12SectorOnChainInfo(v12 miner12.SectorOnChainInfo) SectorOnChainInfo {
+ info := SectorOnChainInfo{
+ SectorNumber: v12.SectorNumber,
+ SealProof: v12.SealProof,
+ SealedCID: v12.SealedCID,
+ DealIDs: v12.DealIDs,
+ Activation: v12.Activation,
+ Expiration: v12.Expiration,
+ DealWeight: v12.DealWeight,
+ VerifiedDealWeight: v12.VerifiedDealWeight,
+ InitialPledge: v12.InitialPledge,
+ ExpectedDayReward: v12.ExpectedDayReward,
+ ExpectedStoragePledge: v12.ExpectedStoragePledge,
+
+ SectorKeyCID: v12.SectorKeyCID,
+ }
+ return info
+}
+
+func fromV12SectorPreCommitOnChainInfo(v12 miner12.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
+ ret := SectorPreCommitOnChainInfo{
+ Info: SectorPreCommitInfo{
+ SealProof: v12.Info.SealProof,
+ SectorNumber: v12.Info.SectorNumber,
+ SealedCID: v12.Info.SealedCID,
+ SealRandEpoch: v12.Info.SealRandEpoch,
+ DealIDs: v12.Info.DealIDs,
+ Expiration: v12.Info.Expiration,
+ UnsealedCid: nil,
+ },
+ PreCommitDeposit: v12.PreCommitDeposit,
+ PreCommitEpoch: v12.PreCommitEpoch,
+ }
+
+ ret.Info.UnsealedCid = v12.Info.UnsealedCid
+
+ return ret
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.MinerKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/multisig/message10.go b/chain/actors/builtin/multisig/message10.go
index 5f70ea3c1..8f7bb5a6f 100644
--- a/chain/actors/builtin/multisig/message10.go
+++ b/chain/actors/builtin/multisig/message10.go
@@ -8,7 +8,7 @@ import (
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig"
- init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
+ init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
@@ -57,7 +57,7 @@ func (m message10) Create(
}
// new actors are created by invoking 'exec' on the init actor with the constructor params
- execParams := &init11.ExecParams{
+ execParams := &init12.ExecParams{
CodeCID: code,
ConstructorParams: enc,
}
diff --git a/chain/actors/builtin/multisig/message11.go b/chain/actors/builtin/multisig/message11.go
index a2c086614..4c7520d5d 100644
--- a/chain/actors/builtin/multisig/message11.go
+++ b/chain/actors/builtin/multisig/message11.go
@@ -7,8 +7,8 @@ import (
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
- init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig"
+ init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
@@ -57,7 +57,7 @@ func (m message11) Create(
}
// new actors are created by invoking 'exec' on the init actor with the constructor params
- execParams := &init11.ExecParams{
+ execParams := &init12.ExecParams{
CodeCID: code,
ConstructorParams: enc,
}
diff --git a/chain/actors/builtin/multisig/message12.go b/chain/actors/builtin/multisig/message12.go
new file mode 100644
index 000000000..43658c04b
--- /dev/null
+++ b/chain/actors/builtin/multisig/message12.go
@@ -0,0 +1,77 @@
+package multisig
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ builtintypes "github.com/filecoin-project/go-state-types/builtin"
+ init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
+ multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message12 struct{ message0 }
+
+func (m message12) Create(
+ signers []address.Address, threshold uint64,
+ unlockStart, unlockDuration abi.ChainEpoch,
+ initialAmount abi.TokenAmount,
+) (*types.Message, error) {
+
+ lenAddrs := uint64(len(signers))
+
+ if lenAddrs < threshold {
+ return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
+ }
+
+ if threshold == 0 {
+ threshold = lenAddrs
+ }
+
+ if m.from == address.Undef {
+ return nil, xerrors.Errorf("must provide source address")
+ }
+
+ // Set up constructor parameters for multisig
+ msigParams := &multisig12.ConstructorParams{
+ Signers: signers,
+ NumApprovalsThreshold: threshold,
+ UnlockDuration: unlockDuration,
+ StartEpoch: unlockStart,
+ }
+
+ enc, actErr := actors.SerializeParams(msigParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ code, ok := actors.GetActorCodeID(actorstypes.Version12, manifest.MultisigKey)
+ if !ok {
+ return nil, xerrors.Errorf("failed to get multisig code ID")
+ }
+
+ // new actors are created by invoking 'exec' on the init actor with the constructor params
+ execParams := &init12.ExecParams{
+ CodeCID: code,
+ ConstructorParams: enc,
+ }
+
+ enc, actErr = actors.SerializeParams(execParams)
+ if actErr != nil {
+ return nil, actErr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Method: builtintypes.MethodsInit.Exec,
+ Params: enc,
+ Value: initialAmount,
+ }, nil
+}
diff --git a/chain/actors/builtin/multisig/message8.go b/chain/actors/builtin/multisig/message8.go
index 817d66726..390c94691 100644
--- a/chain/actors/builtin/multisig/message8.go
+++ b/chain/actors/builtin/multisig/message8.go
@@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
- init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
+ init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig"
"github.com/filecoin-project/go-state-types/manifest"
@@ -57,7 +57,7 @@ func (m message8) Create(
}
// new actors are created by invoking 'exec' on the init actor with the constructor params
- execParams := &init11.ExecParams{
+ execParams := &init12.ExecParams{
CodeCID: code,
ConstructorParams: enc,
}
diff --git a/chain/actors/builtin/multisig/message9.go b/chain/actors/builtin/multisig/message9.go
index 1472c4e66..907bec7d5 100644
--- a/chain/actors/builtin/multisig/message9.go
+++ b/chain/actors/builtin/multisig/message9.go
@@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
- init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
+ init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig"
"github.com/filecoin-project/go-state-types/manifest"
@@ -57,7 +57,7 @@ func (m message9) Create(
}
// new actors are created by invoking 'exec' on the init actor with the constructor params
- execParams := &init11.ExecParams{
+ execParams := &init12.ExecParams{
CodeCID: code,
ConstructorParams: enc,
}
diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go
index 9ab8fffb5..71a3b7b22 100644
--- a/chain/actors/builtin/multisig/multisig.go
+++ b/chain/actors/builtin/multisig/multisig.go
@@ -12,7 +12,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
- msig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig"
+ msig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@@ -48,6 +48,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -115,6 +118,9 @@ func MakeState(store adt.Store, av actorstypes.Version, signers []address.Addres
case actorstypes.Version11:
return make11(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+ case actorstypes.Version12:
+ return make12(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -141,7 +147,7 @@ type State interface {
GetState() interface{}
}
-type Transaction = msig11.Transaction
+type Transaction = msig12.Transaction
var Methods = builtintypes.MethodsMultisig
@@ -180,6 +186,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder {
case actorstypes.Version11:
return message11{message0{from}}
+
+ case actorstypes.Version12:
+ return message12{message0{from}}
default:
panic(fmt.Sprintf("unsupported actors version: %d", version))
}
@@ -203,13 +212,13 @@ type MessageBuilder interface {
}
// this type is the same between v0 and v2
-type ProposalHashData = msig11.ProposalHashData
-type ProposeReturn = msig11.ProposeReturn
-type ProposeParams = msig11.ProposeParams
-type ApproveReturn = msig11.ApproveReturn
+type ProposalHashData = msig12.ProposalHashData
+type ProposeReturn = msig12.ProposeReturn
+type ProposeParams = msig12.ProposeParams
+type ApproveReturn = msig12.ApproveReturn
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
- params := msig11.TxnIDParams{ID: msig11.TxnID(id)}
+ params := msig12.TxnIDParams{ID: msig12.TxnID(id)}
if data != nil {
if data.Requester.Protocol() != address.ID {
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
@@ -244,5 +253,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/multisig/v12.go b/chain/actors/builtin/multisig/v12.go
new file mode 100644
index 000000000..d3d2f3809
--- /dev/null
+++ b/chain/actors/builtin/multisig/v12.go
@@ -0,0 +1,138 @@
+package multisig
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
+ msig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
+ out := state12{store: store}
+ out.State = msig12.State{}
+ out.State.Signers = signers
+ out.State.NumApprovalsThreshold = threshold
+ out.State.StartEpoch = startEpoch
+ out.State.UnlockDuration = unlockDuration
+ out.State.InitialBalance = initialBalance
+
+ em, err := adt12.StoreEmptyMap(store, builtin12.DefaultHamtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State.PendingTxns = em
+
+ return &out, nil
+}
+
+type state12 struct {
+ msig12.State
+ store adt.Store
+}
+
+func (s *state12) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
+ return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
+}
+
+func (s *state12) StartEpoch() (abi.ChainEpoch, error) {
+ return s.State.StartEpoch, nil
+}
+
+func (s *state12) UnlockDuration() (abi.ChainEpoch, error) {
+ return s.State.UnlockDuration, nil
+}
+
+func (s *state12) InitialBalance() (abi.TokenAmount, error) {
+ return s.State.InitialBalance, nil
+}
+
+func (s *state12) Threshold() (uint64, error) {
+ return s.State.NumApprovalsThreshold, nil
+}
+
+func (s *state12) Signers() ([]address.Address, error) {
+ return s.State.Signers, nil
+}
+
+func (s *state12) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
+ arr, err := adt12.AsMap(s.store, s.State.PendingTxns, builtin12.DefaultHamtBitwidth)
+ if err != nil {
+ return err
+ }
+ var out msig12.Transaction
+ return arr.ForEach(&out, func(key string) error {
+ txid, n := binary.Varint([]byte(key))
+ if n <= 0 {
+ return xerrors.Errorf("invalid pending transaction key: %v", key)
+ }
+ return cb(txid, (Transaction)(out)) //nolint:unconvert
+ })
+}
+
+func (s *state12) PendingTxnChanged(other State) (bool, error) {
+ other12, ok := other.(*state12)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.PendingTxns.Equals(other12.PendingTxns), nil
+}
+
+func (s *state12) transactions() (adt.Map, error) {
+ return adt12.AsMap(s.store, s.PendingTxns, builtin12.DefaultHamtBitwidth)
+}
+
+func (s *state12) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
+ var tx msig12.Transaction
+ if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Transaction{}, err
+ }
+ return Transaction(tx), nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.MultisigKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/paych/message12.go b/chain/actors/builtin/paych/message12.go
new file mode 100644
index 000000000..bd821641a
--- /dev/null
+++ b/chain/actors/builtin/paych/message12.go
@@ -0,0 +1,109 @@
+package paych
+
+import (
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
+ init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
+ paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
+ paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type message12 struct{ from address.Address }
+
+func (m message12) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
+
+ actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version12, "paymentchannel")
+ if !ok {
+ return nil, xerrors.Errorf("error getting actor paymentchannel code id for actor version %d", 12)
+ }
+
+ params, aerr := actors.SerializeParams(&paych12.ConstructorParams{From: m.from, To: to})
+ if aerr != nil {
+ return nil, aerr
+ }
+ enc, aerr := actors.SerializeParams(&init12.ExecParams{
+ CodeCID: actorCodeID,
+ ConstructorParams: params,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: init_.Address,
+ From: m.from,
+ Value: initialAmount,
+ Method: builtin12.MethodsInit.Exec,
+ Params: enc,
+ }, nil
+}
+
+func (m message12) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) {
+ params, aerr := actors.SerializeParams(&paych12.UpdateChannelStateParams{
+
+ Sv: toV12SignedVoucher(*sv),
+
+ Secret: secret,
+ })
+ if aerr != nil {
+ return nil, aerr
+ }
+
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin12.MethodsPaych.UpdateChannelState,
+ Params: params,
+ }, nil
+}
+
+func toV12SignedVoucher(sv paychtypes.SignedVoucher) paych12.SignedVoucher {
+ merges := make([]paych12.Merge, len(sv.Merges))
+ for i := range sv.Merges {
+ merges[i] = paych12.Merge{
+ Lane: sv.Merges[i].Lane,
+ Nonce: sv.Merges[i].Nonce,
+ }
+ }
+
+ return paych12.SignedVoucher{
+ ChannelAddr: sv.ChannelAddr,
+ TimeLockMin: sv.TimeLockMin,
+ TimeLockMax: sv.TimeLockMax,
+ SecretHash: sv.SecretHash,
+ Extra: (*paych12.ModVerifyParams)(sv.Extra),
+ Lane: sv.Lane,
+ Nonce: sv.Nonce,
+ Amount: sv.Amount,
+ MinSettleHeight: sv.MinSettleHeight,
+ Merges: merges,
+ Signature: sv.Signature,
+ }
+}
+
+func (m message12) Settle(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin12.MethodsPaych.Settle,
+ }, nil
+}
+
+func (m message12) Collect(paych address.Address) (*types.Message, error) {
+ return &types.Message{
+ To: paych,
+ From: m.from,
+ Value: abi.NewTokenAmount(0),
+ Method: builtin12.MethodsPaych.Collect,
+ }, nil
+}
diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go
index ccf48dbce..8a7979e95 100644
--- a/chain/actors/builtin/paych/paych.go
+++ b/chain/actors/builtin/paych/paych.go
@@ -50,6 +50,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -167,6 +170,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder {
case actorstypes.Version11:
return message11{from}
+ case actorstypes.Version12:
+ return message12{from}
+
default:
panic(fmt.Sprintf("unsupported actors version: %d", version))
}
@@ -208,5 +214,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/paych/v12.go b/chain/actors/builtin/paych/v12.go
new file mode 100644
index 000000000..5c1330d76
--- /dev/null
+++ b/chain/actors/builtin/paych/v12.go
@@ -0,0 +1,135 @@
+package paych
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ "github.com/filecoin-project/go-state-types/big"
+ paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store) (State, error) {
+ out := state12{store: store}
+ out.State = paych12.State{}
+ return &out, nil
+}
+
+type state12 struct {
+ paych12.State
+ store adt.Store
+ lsAmt *adt12.Array
+}
+
+// Channel owner, who has funded the actor
+func (s *state12) From() (address.Address, error) {
+ return s.State.From, nil
+}
+
+// Recipient of payouts from channel
+func (s *state12) To() (address.Address, error) {
+ return s.State.To, nil
+}
+
+// Height at which the channel can be `Collected`
+func (s *state12) SettlingAt() (abi.ChainEpoch, error) {
+ return s.State.SettlingAt, nil
+}
+
+// Amount successfully redeemed through the payment channel, paid out on `Collect()`
+func (s *state12) ToSend() (abi.TokenAmount, error) {
+ return s.State.ToSend, nil
+}
+
+func (s *state12) getOrLoadLsAmt() (*adt12.Array, error) {
+ if s.lsAmt != nil {
+ return s.lsAmt, nil
+ }
+
+ // Get the lane state from the chain
+ lsamt, err := adt12.AsArray(s.store, s.State.LaneStates, paych12.LaneStatesAmtBitwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ s.lsAmt = lsamt
+ return lsamt, nil
+}
+
+// Get total number of lanes
+func (s *state12) LaneCount() (uint64, error) {
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return 0, err
+ }
+ return lsamt.Length(), nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+// Iterate lane states
+func (s *state12) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
+ // Get the lane state from the chain
+ lsamt, err := s.getOrLoadLsAmt()
+ if err != nil {
+ return err
+ }
+
+ // Note: we use a map instead of an array to store laneStates because the
+ // client sets the lane ID (the index) and potentially they could use a
+ // very large index.
+ var ls paych12.LaneState
+ return lsamt.ForEach(&ls, func(i int64) error {
+ return cb(uint64(i), &laneState12{ls})
+ })
+}
+
+type laneState12 struct {
+ paych12.LaneState
+}
+
+func (ls *laneState12) Redeemed() (big.Int, error) {
+ return ls.LaneState.Redeemed, nil
+}
+
+func (ls *laneState12) Nonce() (uint64, error) {
+ return ls.LaneState.Nonce, nil
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.PaychKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go
index f3bcef5bb..9b64ded38 100644
--- a/chain/actors/builtin/power/power.go
+++ b/chain/actors/builtin/power/power.go
@@ -9,7 +9,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/big"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@@ -27,8 +27,8 @@ import (
)
var (
- Address = builtin11.StoragePowerActorAddr
- Methods = builtin11.MethodsPower
+ Address = builtin12.StoragePowerActorAddr
+ Methods = builtin12.MethodsPower
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@@ -51,6 +51,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -118,6 +121,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
case actorstypes.Version11:
return make11(store)
+ case actorstypes.Version12:
+ return make12(store)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -183,5 +189,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/power/v12.go b/chain/actors/builtin/power/v12.go
new file mode 100644
index 000000000..2e9109022
--- /dev/null
+++ b/chain/actors/builtin/power/v12.go
@@ -0,0 +1,207 @@
+package power
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
+ power12 "github.com/filecoin-project/go-state-types/builtin/v12/power"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store) (State, error) {
+ out := state12{store: store}
+
+ s, err := power12.ConstructState(store)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state12 struct {
+ power12.State
+ store adt.Store
+}
+
+func (s *state12) TotalLocked() (abi.TokenAmount, error) {
+ return s.TotalPledgeCollateral, nil
+}
+
+func (s *state12) TotalPower() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalRawBytePower,
+ QualityAdjPower: s.TotalQualityAdjPower,
+ }, nil
+}
+
+// Committed power to the network. Includes miners below the minimum threshold.
+func (s *state12) TotalCommitted() (Claim, error) {
+ return Claim{
+ RawBytePower: s.TotalBytesCommitted,
+ QualityAdjPower: s.TotalQABytesCommitted,
+ }, nil
+}
+
+func (s *state12) MinerPower(addr address.Address) (Claim, bool, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return Claim{}, false, err
+ }
+ var claim power12.Claim
+ ok, err := claims.Get(abi.AddrKey(addr), &claim)
+ if err != nil {
+ return Claim{}, false, err
+ }
+ return Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ }, ok, nil
+}
+
+func (s *state12) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
+ return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
+}
+
+func (s *state12) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
+ return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
+}
+
+func (s *state12) MinerCounts() (uint64, uint64, error) {
+ return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
+}
+
+func (s *state12) ListAllMiners() ([]address.Address, error) {
+ claims, err := s.claims()
+ if err != nil {
+ return nil, err
+ }
+
+ var miners []address.Address
+ err = claims.ForEach(nil, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ miners = append(miners, a)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return miners, nil
+}
+
+func (s *state12) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
+ claims, err := s.claims()
+ if err != nil {
+ return err
+ }
+
+ var claim power12.Claim
+ return claims.ForEach(&claim, func(k string) error {
+ a, err := address.NewFromBytes([]byte(k))
+ if err != nil {
+ return err
+ }
+ return cb(a, Claim{
+ RawBytePower: claim.RawBytePower,
+ QualityAdjPower: claim.QualityAdjPower,
+ })
+ })
+}
+
+func (s *state12) ClaimsChanged(other State) (bool, error) {
+ other12, ok := other.(*state12)
+ if !ok {
+ // treat an upgrade as a change, always
+ return true, nil
+ }
+ return !s.State.Claims.Equals(other12.State.Claims), nil
+}
+
+func (s *state12) SetTotalQualityAdjPower(p abi.StoragePower) error {
+ s.State.TotalQualityAdjPower = p
+ return nil
+}
+
+func (s *state12) SetTotalRawBytePower(p abi.StoragePower) error {
+ s.State.TotalRawBytePower = p
+ return nil
+}
+
+func (s *state12) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
+ s.State.ThisEpochQualityAdjPower = p
+ return nil
+}
+
+func (s *state12) SetThisEpochRawBytePower(p abi.StoragePower) error {
+ s.State.ThisEpochRawBytePower = p
+ return nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) claims() (adt.Map, error) {
+ return adt12.AsMap(s.store, s.Claims, builtin12.DefaultHamtBitwidth)
+}
+
+func (s *state12) decodeClaim(val *cbg.Deferred) (Claim, error) {
+ var ci power12.Claim
+ if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
+ return Claim{}, err
+ }
+ return fromV12Claim(ci), nil
+}
+
+func fromV12Claim(v12 power12.Claim) Claim {
+ return Claim{
+ RawBytePower: v12.RawBytePower,
+ QualityAdjPower: v12.QualityAdjPower,
+ }
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.PowerKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/registry.go b/chain/actors/builtin/registry.go
index 4addbd451..6ba5fef03 100644
--- a/chain/actors/builtin/registry.go
+++ b/chain/actors/builtin/registry.go
@@ -42,6 +42,22 @@ import (
reward11 "github.com/filecoin-project/go-state-types/builtin/v11/reward"
system11 "github.com/filecoin-project/go-state-types/builtin/v11/system"
verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg"
+ account12 "github.com/filecoin-project/go-state-types/builtin/v12/account"
+ cron12 "github.com/filecoin-project/go-state-types/builtin/v12/cron"
+ datacap12 "github.com/filecoin-project/go-state-types/builtin/v12/datacap"
+ eam12 "github.com/filecoin-project/go-state-types/builtin/v12/eam"
+ ethaccount12 "github.com/filecoin-project/go-state-types/builtin/v12/ethaccount"
+ evm12 "github.com/filecoin-project/go-state-types/builtin/v12/evm"
+ _init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
+ market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
+ miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
+ multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
+ paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
+ placeholder12 "github.com/filecoin-project/go-state-types/builtin/v12/placeholder"
+ power12 "github.com/filecoin-project/go-state-types/builtin/v12/power"
+ reward12 "github.com/filecoin-project/go-state-types/builtin/v12/reward"
+ system12 "github.com/filecoin-project/go-state-types/builtin/v12/system"
+ verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
account8 "github.com/filecoin-project/go-state-types/builtin/v8/account"
cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron"
_init8 "github.com/filecoin-project/go-state-types/builtin/v8/init"
@@ -497,6 +513,110 @@ func MakeRegistry(av actorstypes.Version) []RegistryEntry {
}
}
+ case actorstypes.Version12:
+ for key, codeID := range codeIDs {
+ switch key {
+ case manifest.AccountKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: account12.Methods,
+ state: new(account12.State),
+ })
+ case manifest.CronKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: cron12.Methods,
+ state: new(cron12.State),
+ })
+ case manifest.InitKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: _init12.Methods,
+ state: new(_init12.State),
+ })
+ case manifest.MarketKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: market12.Methods,
+ state: new(market12.State),
+ })
+ case manifest.MinerKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: miner12.Methods,
+ state: new(miner12.State),
+ })
+ case manifest.MultisigKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: multisig12.Methods,
+ state: new(multisig12.State),
+ })
+ case manifest.PaychKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: paych12.Methods,
+ state: new(paych12.State),
+ })
+ case manifest.PowerKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: power12.Methods,
+ state: new(power12.State),
+ })
+ case manifest.RewardKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: reward12.Methods,
+ state: new(reward12.State),
+ })
+ case manifest.SystemKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: system12.Methods,
+ state: new(system12.State),
+ })
+ case manifest.VerifregKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: verifreg12.Methods,
+ state: new(verifreg12.State),
+ })
+ case manifest.DatacapKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: datacap12.Methods,
+ state: new(datacap12.State),
+ })
+
+ case manifest.EvmKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: evm12.Methods,
+ state: new(evm12.State),
+ })
+ case manifest.EamKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: eam12.Methods,
+ state: nil,
+ })
+ case manifest.PlaceholderKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: placeholder12.Methods,
+ state: nil,
+ })
+ case manifest.EthAccountKey:
+ registry = append(registry, RegistryEntry{
+ code: codeID,
+ methods: ethaccount12.Methods,
+ state: nil,
+ })
+
+ }
+ }
+
default:
panic("expected version v8 and up only, use specs-actors for v0-7")
}
diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go
index b0060a217..3c6463645 100644
--- a/chain/actors/builtin/reward/reward.go
+++ b/chain/actors/builtin/reward/reward.go
@@ -6,7 +6,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@@ -25,8 +25,8 @@ import (
)
var (
- Address = builtin11.RewardActorAddr
- Methods = builtin11.MethodsReward
+ Address = builtin12.RewardActorAddr
+ Methods = builtin12.MethodsReward
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@@ -49,6 +49,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -116,6 +119,9 @@ func MakeState(store adt.Store, av actorstypes.Version, currRealizedPower abi.St
case actorstypes.Version11:
return make11(store, currRealizedPower)
+ case actorstypes.Version12:
+ return make12(store, currRealizedPower)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -159,5 +165,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/reward/v12.go b/chain/actors/builtin/reward/v12.go
new file mode 100644
index 000000000..ecc8ff5a0
--- /dev/null
+++ b/chain/actors/builtin/reward/v12.go
@@ -0,0 +1,120 @@
+package reward
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
+ reward12 "github.com/filecoin-project/go-state-types/builtin/v12/reward"
+ smoothing12 "github.com/filecoin-project/go-state-types/builtin/v12/util/smoothing"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+ "github.com/filecoin-project/lotus/chain/actors/builtin"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
+ out := state12{store: store}
+ out.State = *reward12.ConstructState(currRealizedPower)
+ return &out, nil
+}
+
+type state12 struct {
+ reward12.State
+ store adt.Store
+}
+
+func (s *state12) ThisEpochReward() (abi.TokenAmount, error) {
+ return s.State.ThisEpochReward, nil
+}
+
+func (s *state12) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
+
+ return builtin.FilterEstimate{
+ PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
+ VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
+ }, nil
+
+}
+
+func (s *state12) ThisEpochBaselinePower() (abi.StoragePower, error) {
+ return s.State.ThisEpochBaselinePower, nil
+}
+
+func (s *state12) TotalStoragePowerReward() (abi.TokenAmount, error) {
+ return s.State.TotalStoragePowerReward, nil
+}
+
+func (s *state12) EffectiveBaselinePower() (abi.StoragePower, error) {
+ return s.State.EffectiveBaselinePower, nil
+}
+
+func (s *state12) EffectiveNetworkTime() (abi.ChainEpoch, error) {
+ return s.State.EffectiveNetworkTime, nil
+}
+
+func (s *state12) CumsumBaseline() (reward12.Spacetime, error) {
+ return s.State.CumsumBaseline, nil
+}
+
+func (s *state12) CumsumRealized() (reward12.Spacetime, error) {
+ return s.State.CumsumRealized, nil
+}
+
+func (s *state12) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
+ return miner12.InitialPledgeForPower(
+ qaPower,
+ s.State.ThisEpochBaselinePower,
+ s.State.ThisEpochRewardSmoothed,
+ smoothing12.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ circSupply,
+ ), nil
+}
+
+func (s *state12) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
+ return miner12.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
+ smoothing12.FilterEstimate{
+ PositionEstimate: networkQAPower.PositionEstimate,
+ VelocityEstimate: networkQAPower.VelocityEstimate,
+ },
+ sectorWeight), nil
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.RewardKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go
index 4db8db610..2a2b703bb 100644
--- a/chain/actors/builtin/system/system.go
+++ b/chain/actors/builtin/system/system.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@@ -21,7 +21,7 @@ import (
)
var (
- Address = builtin11.SystemActorAddr
+ Address = builtin12.SystemActorAddr
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@@ -44,6 +44,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -111,6 +114,9 @@ func MakeState(store adt.Store, av actorstypes.Version, builtinActors cid.Cid) (
case actorstypes.Version11:
return make11(store, builtinActors)
+ case actorstypes.Version12:
+ return make12(store, builtinActors)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -138,5 +144,6 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/builtin/system/v12.go b/chain/actors/builtin/system/v12.go
new file mode 100644
index 000000000..71938e799
--- /dev/null
+++ b/chain/actors/builtin/system/v12.go
@@ -0,0 +1,72 @@
+package system
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ system12 "github.com/filecoin-project/go-state-types/builtin/v12/system"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, builtinActors cid.Cid) (State, error) {
+ out := state12{store: store}
+ out.State = system12.State{
+ BuiltinActors: builtinActors,
+ }
+ return &out, nil
+}
+
+type state12 struct {
+ system12.State
+ store adt.Store
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) GetBuiltinActors() cid.Cid {
+
+ return s.State.BuiltinActors
+
+}
+
+func (s *state12) SetBuiltinActors(c cid.Cid) error {
+
+ s.State.BuiltinActors = c
+ return nil
+
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.SystemKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/verifreg/v12.go b/chain/actors/builtin/verifreg/v12.go
new file mode 100644
index 000000000..77a113fbe
--- /dev/null
+++ b/chain/actors/builtin/verifreg/v12.go
@@ -0,0 +1,170 @@
+package verifreg
+
+import (
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ actorstypes "github.com/filecoin-project/go-state-types/actors"
+ "github.com/filecoin-project/go-state-types/big"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
+ adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
+ verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
+ verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
+ "github.com/filecoin-project/go-state-types/manifest"
+
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/adt"
+)
+
+var _ State = (*state12)(nil)
+
+func load12(store adt.Store, root cid.Cid) (State, error) {
+ out := state12{store: store}
+ err := store.Get(store.Context(), root, &out)
+ if err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+func make12(store adt.Store, rootKeyAddress address.Address) (State, error) {
+ out := state12{store: store}
+
+ s, err := verifreg12.ConstructState(store, rootKeyAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ out.State = *s
+
+ return &out, nil
+}
+
+type state12 struct {
+ verifreg12.State
+ store adt.Store
+}
+
+func (s *state12) RootKey() (address.Address, error) {
+ return s.State.RootKey, nil
+}
+
+func (s *state12) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+
+ return false, big.Zero(), xerrors.Errorf("unsupported in actors v12")
+
+}
+
+func (s *state12) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
+ return getDataCap(s.store, actors.Version12, s.verifiers, addr)
+}
+
+func (s *state12) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
+ return getRemoveDataCapProposalID(s.store, actors.Version12, s.removeDataCapProposalIDs, verifier, client)
+}
+
+func (s *state12) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+ return forEachCap(s.store, actors.Version12, s.verifiers, cb)
+}
+
+func (s *state12) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
+
+ return xerrors.Errorf("unsupported in actors v12")
+
+}
+
+func (s *state12) verifiedClients() (adt.Map, error) {
+
+ return nil, xerrors.Errorf("unsupported in actors v12")
+
+}
+
+func (s *state12) verifiers() (adt.Map, error) {
+ return adt12.AsMap(s.store, s.Verifiers, builtin12.DefaultHamtBitwidth)
+}
+
+func (s *state12) removeDataCapProposalIDs() (adt.Map, error) {
+ return adt12.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin12.DefaultHamtBitwidth)
+}
+
+func (s *state12) GetState() interface{} {
+ return &s.State
+}
+
+func (s *state12) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*Allocation, bool, error) {
+
+ alloc, ok, err := s.FindAllocation(s.store, clientIdAddr, verifreg12.AllocationId(allocationId))
+ return (*Allocation)(alloc), ok, err
+}
+
+func (s *state12) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) {
+
+ v12Map, err := s.LoadAllocationsToMap(s.store, clientIdAddr)
+
+ retMap := make(map[AllocationId]Allocation, len(v12Map))
+ for k, v := range v12Map {
+ retMap[AllocationId(k)] = Allocation(v)
+ }
+
+ return retMap, err
+
+}
+
+func (s *state12) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) {
+
+ claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg12.ClaimId(claimId))
+ return (*Claim)(claim), ok, err
+
+}
+
+func (s *state12) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) {
+
+ v12Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
+
+ retMap := make(map[ClaimId]Claim, len(v12Map))
+ for k, v := range v12Map {
+ retMap[ClaimId(k)] = Claim(v)
+ }
+
+ return retMap, err
+
+}
+
+func (s *state12) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
+
+ v12Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
+
+ retMap := make(map[abi.SectorNumber][]ClaimId)
+ for k, v := range v12Map {
+ claims, ok := retMap[v.Sector]
+ if !ok {
+ retMap[v.Sector] = []ClaimId{ClaimId(k)}
+ } else {
+ retMap[v.Sector] = append(claims, ClaimId(k))
+ }
+ }
+
+ return retMap, err
+
+}
+
+func (s *state12) ActorKey() string {
+ return manifest.VerifregKey
+}
+
+func (s *state12) ActorVersion() actorstypes.Version {
+ return actorstypes.Version12
+}
+
+func (s *state12) Code() cid.Cid {
+ code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
+ if !ok {
+ panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
+ }
+
+ return code
+}
diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go
index eb911ea46..e79a790c7 100644
--- a/chain/actors/builtin/verifreg/verifreg.go
+++ b/chain/actors/builtin/verifreg/verifreg.go
@@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
- builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
@@ -25,8 +25,8 @@ import (
)
var (
- Address = builtin11.VerifiedRegistryActorAddr
- Methods = builtin11.MethodsVerifiedRegistry
+ Address = builtin12.VerifiedRegistryActorAddr
+ Methods = builtin12.MethodsVerifiedRegistry
)
func Load(store adt.Store, act *types.Actor) (State, error) {
@@ -49,6 +49,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version11:
return load11(store, act.Head)
+ case actorstypes.Version12:
+ return load12(store, act.Head)
+
}
}
@@ -116,6 +119,9 @@ func MakeState(store adt.Store, av actorstypes.Version, rootKeyAddress address.A
case actorstypes.Version11:
return make11(store, rootKeyAddress)
+ case actorstypes.Version12:
+ return make12(store, rootKeyAddress)
+
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
@@ -154,6 +160,7 @@ func AllCodes() []cid.Cid {
(&state9{}).Code(),
(&state10{}).Code(),
(&state11{}).Code(),
+ (&state12{}).Code(),
}
}
diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go
index bf982af89..d680def37 100644
--- a/chain/actors/policy/policy.go
+++ b/chain/actors/policy/policy.go
@@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-state-types/big"
builtin10 "github.com/filecoin-project/go-state-types/builtin"
builtin11 "github.com/filecoin-project/go-state-types/builtin"
+ builtin12 "github.com/filecoin-project/go-state-types/builtin"
builtin8 "github.com/filecoin-project/go-state-types/builtin"
builtin9 "github.com/filecoin-project/go-state-types/builtin"
market10 "github.com/filecoin-project/go-state-types/builtin/v10/market"
@@ -15,8 +16,11 @@ import (
verifreg10 "github.com/filecoin-project/go-state-types/builtin/v10/verifreg"
market11 "github.com/filecoin-project/go-state-types/builtin/v11/market"
miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
- paych11 "github.com/filecoin-project/go-state-types/builtin/v11/paych"
verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg"
+ market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
+ miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
+ paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
+ verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
market8 "github.com/filecoin-project/go-state-types/builtin/v8/market"
miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner"
verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg"
@@ -55,14 +59,14 @@ import (
)
const (
- ChainFinality = miner11.ChainFinality
+ ChainFinality = miner12.ChainFinality
SealRandomnessLookback = ChainFinality
- PaychSettleDelay = paych11.SettleDelay
- MaxPreCommitRandomnessLookback = builtin11.EpochsInDay + SealRandomnessLookback
+ PaychSettleDelay = paych12.SettleDelay
+ MaxPreCommitRandomnessLookback = builtin12.EpochsInDay + SealRandomnessLookback
)
var (
- MarketDefaultAllocationTermBuffer = market11.MarketDefaultAllocationTermBuffer
+ MarketDefaultAllocationTermBuffer = market12.MarketDefaultAllocationTermBuffer
)
// SetSupportedProofTypes sets supported proof types, across all actor versions.
@@ -175,11 +179,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
miner11.PreCommitChallengeDelay = delay
+ miner12.PreCommitChallengeDelay = delay
+
}
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
func GetPreCommitChallengeDelay() abi.ChainEpoch {
- return miner11.PreCommitChallengeDelay
+ return miner12.PreCommitChallengeDelay
}
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
@@ -229,6 +235,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
policy.ConsensusMinerMinPower = p
}
+ for _, policy := range builtin12.PoStProofPolicies {
+ policy.ConsensusMinerMinPower = p
+ }
+
}
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
@@ -257,6 +267,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) {
verifreg11.MinVerifiedDealSize = size
+ verifreg12.MinVerifiedDealSize = size
+
}
func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) {
@@ -306,6 +318,10 @@ func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProo
return miner11.MaxProveCommitDuration[t], nil
+ case actorstypes.Version12:
+
+ return miner12.MaxProveCommitDuration[t], nil
+
default:
return 0, xerrors.Errorf("unsupported actors version")
}
@@ -366,6 +382,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) {
Denominator: denom,
}
+ market12.ProviderCollateralSupplyTarget = builtin12.BigFrac{
+ Numerator: num,
+ Denominator: denom,
+ }
+
}
func DealProviderCollateralBounds(
@@ -434,13 +455,18 @@ func DealProviderCollateralBounds(
min, max := market11.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
+ case actorstypes.Version12:
+
+ min, max := market12.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
+ return min, max, nil
+
default:
return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version")
}
}
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
- return market11.DealDurationBounds(pieceSize)
+ return market12.DealDurationBounds(pieceSize)
}
// Sets the challenge window and scales the proving period to match (such that
@@ -516,6 +542,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
// scale it if we're scaling the challenge period.
miner11.WPoStDisputeWindow = period * 30
+ miner12.WPoStChallengeWindow = period
+ miner12.WPoStProvingPeriod = period * abi.ChainEpoch(miner12.WPoStPeriodDeadlines)
+
+ // by default, this is 2x finality which is 30 periods.
+ // scale it if we're scaling the challenge period.
+ miner12.WPoStDisputeWindow = period * 30
+
}
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
@@ -528,15 +561,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
}
func GetMaxSectorExpirationExtension() abi.ChainEpoch {
- return miner11.MaxSectorExpirationExtension
+ return miner12.MaxSectorExpirationExtension
}
func GetMinSectorExpiration() abi.ChainEpoch {
- return miner11.MinSectorExpiration
+ return miner12.MinSectorExpiration
}
func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
- sectorsPerPart, err := builtin11.PoStProofWindowPoStPartitionSectors(p)
+ sectorsPerPart, err := builtin12.PoStProofWindowPoStPartitionSectors(p)
if err != nil {
return 0, err
}
@@ -556,7 +589,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version)
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
}
- return builtin11.SealProofPoliciesV11[proof].SectorMaxLifetime
+ return builtin12.SealProofPoliciesV11[proof].SectorMaxLifetime
}
func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
@@ -599,6 +632,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
case actorstypes.Version11:
return miner11.AddressedSectorsMax, nil
+ case actorstypes.Version12:
+ return miner12.AddressedSectorsMax, nil
+
default:
return 0, xerrors.Errorf("unsupported network version")
}
@@ -656,6 +692,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) {
return miner11.DeclarationsMax, nil
+ case actorstypes.Version12:
+
+ return miner12.DeclarationsMax, nil
+
default:
return 0, xerrors.Errorf("unsupported network version")
}
@@ -712,6 +752,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba
return miner11.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
+ case actorstypes.Version12:
+
+ return miner12.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
+
default:
return big.Zero(), xerrors.Errorf("unsupported network version")
}
@@ -768,6 +812,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
return miner11.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
+ case actorstypes.Version12:
+
+ return miner12.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
+
default:
return big.Zero(), xerrors.Errorf("unsupported network version")
}
diff --git a/chain/actors/version.go b/chain/actors/version.go
index 3a5b935bf..92c0da006 100644
--- a/chain/actors/version.go
+++ b/chain/actors/version.go
@@ -14,9 +14,9 @@ const ({{range .actorVersions}}
/* inline-gen start */
-var LatestVersion = 11
+var LatestVersion = 12
-var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
+var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
const (
Version0 Version = 0
@@ -30,6 +30,7 @@ const (
Version9 Version = 9
Version10 Version = 10
Version11 Version = 11
+ Version12 Version = 12
)
/* inline-gen end */
diff --git a/chain/badtscache.go b/chain/badtscache.go
index 0f215dcdc..162e6b7a7 100644
--- a/chain/badtscache.go
+++ b/chain/badtscache.go
@@ -3,14 +3,14 @@ package chain
import (
"fmt"
- lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/hashicorp/golang-lru/arc/v2"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/build"
)
type BadBlockCache struct {
- badBlocks *lru.ARCCache[cid.Cid, BadBlockReason]
+ badBlocks *arc.ARCCache[cid.Cid, BadBlockReason]
}
type BadBlockReason struct {
@@ -43,7 +43,7 @@ func (bbr BadBlockReason) String() string {
}
func NewBadBlockCache() *BadBlockCache {
- cache, err := lru.NewARC[cid.Cid, BadBlockReason](build.BadBlockCacheSize)
+ cache, err := arc.NewARC[cid.Cid, BadBlockReason](build.BadBlockCacheSize)
if err != nil {
panic(err) // ok
}
diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go
index 9b62a7928..5825fa691 100644
--- a/chain/beacon/drand/drand.go
+++ b/chain/beacon/drand/drand.go
@@ -29,19 +29,6 @@ import (
var log = logging.Logger("drand")
-type drandPeer struct {
- addr string
- tls bool
-}
-
-func (dp *drandPeer) Address() string {
- return dp.addr
-}
-
-func (dp *drandPeer) IsTLS() bool {
- return dp.tls
-}
-
// DrandBeacon connects Lotus with a drand network in order to provide
// randomness to the system in a way that's aligned with Filecoin rounds/epochs.
//
@@ -235,3 +222,16 @@ func (db *DrandBeacon) maxBeaconRoundV2(latestTs uint64) uint64 {
}
var _ beacon.RandomBeacon = (*DrandBeacon)(nil)
+
+func BeaconScheduleFromDrandSchedule(dcs dtypes.DrandSchedule, genesisTime uint64, ps *pubsub.PubSub) (beacon.Schedule, error) {
+ shd := beacon.Schedule{}
+ for _, dc := range dcs {
+ bc, err := NewDrandBeacon(genesisTime, build.BlockDelaySecs, ps, dc.Config)
+ if err != nil {
+ return nil, xerrors.Errorf("creating drand beacon: %w", err)
+ }
+ shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ }
+
+ return shd, nil
+}
diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go
index 6b08519af..1edeb60b7 100644
--- a/chain/consensus/compute_state.go
+++ b/chain/consensus/compute_state.go
@@ -52,6 +52,7 @@ func NewActorRegistry() *vm.ActorRegistry {
inv.Register(actorstypes.Version9, vm.ActorsVersionPredicate(actorstypes.Version9), builtin.MakeRegistry(actorstypes.Version9))
inv.Register(actorstypes.Version10, vm.ActorsVersionPredicate(actorstypes.Version10), builtin.MakeRegistry(actorstypes.Version10))
inv.Register(actorstypes.Version11, vm.ActorsVersionPredicate(actorstypes.Version11), builtin.MakeRegistry(actorstypes.Version11))
+ inv.Register(actorstypes.Version12, vm.ActorsVersionPredicate(actorstypes.Version12), builtin.MakeRegistry(actorstypes.Version12))
return inv
}
@@ -80,7 +81,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
pstate cid.Cid,
bms []FilecoinBlockMessages,
epoch abi.ChainEpoch,
- r vm.Rand,
+ r rand.Rand,
em stmgr.ExecMonitor,
vmTracing bool,
baseFee abi.TokenAmount,
@@ -135,6 +136,10 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return xerrors.Errorf("running cron: %w", err)
}
+ if !ret.ExitCode.IsSuccess() {
+ return xerrors.Errorf("cron failed with exit code %d: %w", ret.ExitCode, ret.ActorErr)
+ }
+
cronGas += ret.GasUsed
if em != nil {
diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go
index 509eb8a5e..b5ec13a60 100644
--- a/chain/consensus/filcns/filecoin.go
+++ b/chain/consensus/filcns/filecoin.go
@@ -80,6 +80,11 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito
if actErr != nil {
return xerrors.Errorf("failed to apply reward message: %w", actErr)
}
+
+ if !ret.ExitCode.IsSuccess() {
+ return xerrors.Errorf("reward actor failed with exit code %d: %w", ret.ExitCode, ret.ActorErr)
+ }
+
if em != nil {
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on reward message: %w", err)
@@ -196,7 +201,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
}
- vrfBase, err := rand.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
+ vrfBase, err := rand.DrawRandomnessFromBase(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("could not draw randomness: %w", err)
}
@@ -262,7 +267,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
- vrfBase, err := rand.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
+ vrfBase, err := rand.DrawRandomnessFromBase(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
}
@@ -340,7 +345,7 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
}
- rand, err := rand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
+ rand, err := rand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
if err != nil {
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
}
diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go
index 075937a3c..16abec6a8 100644
--- a/chain/consensus/filcns/upgrades.go
+++ b/chain/consensus/filcns/upgrades.go
@@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/go-state-types/big"
nv18 "github.com/filecoin-project/go-state-types/builtin/v10/migration"
nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration"
+ nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration"
nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/go-state-types/migration"
@@ -261,6 +262,17 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
Height: build.UpgradeThunderHeight,
Network: network.Version20,
Migration: nil,
+ }, {
+ Height: build.UpgradeWatermelonHeight,
+ Network: network.Version21,
+ Migration: UpgradeActorsV12,
+ PreMigrations: []stmgr.PreMigration{{
+ PreMigration: PreUpgradeActorsV12,
+ StartWithin: 120,
+ DontStartWithin: 15,
+ StopWithin: 10,
+ }},
+ Expensive: true,
},
}
@@ -1814,6 +1826,108 @@ func upgradeActorsV11Common(
return newRoot, nil
}
+func PreUpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
+ // Use half the CPUs for pre-migration, but leave at least 3.
+ workerCount := MigrationMaxWorkerCount
+ if workerCount <= 4 {
+ workerCount = 1
+ } else {
+ workerCount /= 2
+ }
+
+ lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
+ if err != nil {
+ return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
+ }
+
+ config := migration.Config{
+ MaxWorkers: uint(workerCount),
+ ProgressLogPeriod: time.Minute * 5,
+ }
+
+ _, err = upgradeActorsV12Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
+ return err
+}
+
+func UpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
+ root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
+ // Use all the CPUs except 2.
+ workerCount := MigrationMaxWorkerCount - 3
+ if workerCount <= 0 {
+ workerCount = 1
+ }
+ config := migration.Config{
+ MaxWorkers: uint(workerCount),
+ JobQueueSize: 1000,
+ ResultQueueSize: 100,
+ ProgressLogPeriod: 10 * time.Second,
+ }
+ newRoot, err := upgradeActorsV12Common(ctx, sm, cache, root, epoch, ts, config)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err)
+ }
+ return newRoot, nil
+}
+
+func upgradeActorsV12Common(
+ ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
+ root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
+ config migration.Config,
+) (cid.Cid, error) {
+ writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
+ adtStore := store.ActorStore(ctx, writeStore)
+ // ensure that the manifest is loaded in the blockstore
+ if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version12); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
+ }
+
+ // Load the state root.
+ var stateRoot types.StateRoot
+ if err := adtStore.Get(ctx, root, &stateRoot); err != nil {
+ return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
+ }
+
+ if stateRoot.Version != types.StateTreeVersion5 {
+ return cid.Undef, xerrors.Errorf(
+ "expected state root version 5 for actors v12 upgrade, got %d",
+ stateRoot.Version,
+ )
+ }
+
+ manifest, ok := actors.GetManifest(actorstypes.Version12)
+ if !ok {
+ return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade")
+ }
+
+ // Perform the migration
+ newHamtRoot, err := nv21.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config,
+ migrationLogger{}, cache)
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("upgrading to actors v12: %w", err)
+ }
+
+ // Persist the result.
+ newRoot, err := adtStore.Put(ctx, &types.StateRoot{
+ Version: types.StateTreeVersion5,
+ Actors: newHamtRoot,
+ Info: stateRoot.Info,
+ })
+ if err != nil {
+ return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
+ }
+
+ // Persists the new tree and shuts down the flush worker
+ if err := writeStore.Flush(ctx); err != nil {
+ return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
+ }
+
+ if err := writeStore.Shutdown(ctx); err != nil {
+ return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
+ }
+
+ return newRoot, nil
+}
+
// Example upgrade function if upgrade requires only code changes
//func UpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, _ stmgr.ExecMonitor, root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
// buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go
index 27aec481f..bacba60d7 100644
--- a/chain/events/filter/index.go
+++ b/chain/events/filter/index.go
@@ -128,6 +128,16 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
// rollback the transaction (a no-op if the transaction was already committed)
defer tx.Rollback() //nolint:errcheck
+ // create some temporary indices to help speed up the migration
+ _, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)")
+ if err != nil {
+ return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err)
+ }
+ _, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)")
+ if err != nil {
+ return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
+ }
+
stmtDeleteOffChainEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid!=? and height=?")
if err != nil {
return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err)
@@ -158,12 +168,16 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
currTs := chainStore.GetHeaviestTipSet()
for int64(currTs.Height()) >= minHeight.Int64 {
+ if currTs.Height()%1000 == 0 {
+ log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64)
+ }
+
tsKey := currTs.Parents()
currTs, err = chainStore.GetTipSetFromKey(ctx, tsKey)
if err != nil {
return xerrors.Errorf("get tipset from key: %w", err)
}
- log.Debugf("Migrating height %d\n", currTs.Height())
+ log.Debugf("Migrating height %d", currTs.Height())
tsKeyCid, err := currTs.Key().Cid()
if err != nil {
@@ -190,7 +204,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
if !eventId.Valid {
continue
}
- log.Debugf("Deleting all events with id < %d at height %d\n", eventId.Int64, currTs.Height())
+ log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height())
res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64)
if err != nil {
@@ -201,7 +215,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
if err != nil {
return xerrors.Errorf("rows affected: %w", err)
}
- log.Debugf("deleted %d events from tipset %s\n", nrRowsAffected, tsKeyCid.String())
+ log.Debugf("deleted %d events from tipset %s", nrRowsAffected, tsKeyCid.String())
}
// delete all entries that have an event_id that doesn't exist (since we don't have a foreign
@@ -217,11 +231,34 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
}
log.Infof("cleaned up %d entries that had deleted events\n", nrRowsAffected)
+ // drop the temporary indices after the migration
+ _, err = tx.Exec("DROP INDEX IF EXISTS tmp_tipset_key_cid")
+ if err != nil {
+ return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
+ }
+ _, err = tx.Exec("DROP INDEX IF EXISTS tmp_height_tipset_key_cid")
+ if err != nil {
+ return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err)
+ }
+
err = tx.Commit()
if err != nil {
return xerrors.Errorf("commit transaction: %w", err)
}
+ // during the migration, we have likely increased the WAL size a lot, so lets do some
+ // simple DB administration to free up space (VACUUM followed by truncating the WAL file)
+ // as this would be a good time to do it when no other writes are happening
+ log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
+ _, err = ei.db.Exec("VACUUM")
+ if err != nil {
+ log.Warnf("error vacuuming database: %s", err)
+ }
+ _, err = ei.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
+ if err != nil {
+ log.Warnf("error checkpointing wal: %s", err)
+ }
+
log.Infof("Successfully migrated events to version 2 in %s", time.Since(now))
return nil
diff --git a/chain/events/message_cache.go b/chain/events/message_cache.go
index d47d3a168..96f6bcbd7 100644
--- a/chain/events/message_cache.go
+++ b/chain/events/message_cache.go
@@ -4,7 +4,7 @@ import (
"context"
"sync"
- lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/hashicorp/golang-lru/arc/v2"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/api"
@@ -14,11 +14,11 @@ type messageCache struct {
api EventAPI
blockMsgLk sync.Mutex
- blockMsgCache *lru.ARCCache[cid.Cid, *api.BlockMessages]
+ blockMsgCache *arc.ARCCache[cid.Cid, *api.BlockMessages]
}
func newMessageCache(a EventAPI) *messageCache {
- blsMsgCache, _ := lru.NewARC[cid.Cid, *api.BlockMessages](500)
+ blsMsgCache, _ := arc.NewARC[cid.Cid, *api.BlockMessages](500)
return &messageCache{
api: a,
diff --git a/chain/events/state/ctxstore.go b/chain/events/state/ctxstore.go
deleted file mode 100644
index 12b45e425..000000000
--- a/chain/events/state/ctxstore.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package state
-
-import (
- "context"
-
- "github.com/ipfs/go-cid"
- cbor "github.com/ipfs/go-ipld-cbor"
-)
-
-type contextStore struct {
- ctx context.Context
- cst *cbor.BasicIpldStore
-}
-
-func (cs *contextStore) Context() context.Context {
- return cs.ctx
-}
-
-func (cs *contextStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
- return cs.cst.Get(ctx, c, out)
-}
-
-func (cs *contextStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
- return cs.cst.Put(ctx, v)
-}
diff --git a/chain/gen/gen.go b/chain/gen/gen.go
index 2e5f5e7f7..9f8d0834d 100644
--- a/chain/gen/gen.go
+++ b/chain/gen/gen.go
@@ -362,7 +362,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
rbase = entries[len(entries)-1]
}
- eproof, err := IsRoundWinner(ctx, pts, round, m, rbase, mbi, mc)
+ eproof, err := IsRoundWinner(ctx, round, m, rbase, mbi, mc)
if err != nil {
return nil, nil, nil, xerrors.Errorf("checking round winner failed: %w", err)
}
@@ -376,7 +376,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
buf.Write(pts.MinTicket().VRFProof)
}
- ticketRand, err := rand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
+ ticketRand, err := rand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return nil, nil, nil, err
}
@@ -449,18 +449,19 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
}
func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
+ ctx := context.TODO()
var blks []*types.FullBlock
for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
for mi, m := range miners {
- bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
+ bvals, et, ticket, err := cg.nextBlockProof(ctx, base, m, round)
if err != nil {
return nil, xerrors.Errorf("next block proof: %w", err)
}
if et != nil {
// TODO: maybe think about passing in more real parameters to this?
- wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0)
+ wpost, err := cg.eppProvs[m].ComputeProof(ctx, nil, nil, round, network.Version0)
if err != nil {
return nil, err
}
@@ -476,8 +477,18 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
}
fts := store.NewFullTipSet(blks)
- if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil {
- return nil, err
+ if err := cg.cs.PersistTipsets(ctx, []*types.TipSet{fts.TipSet()}); err != nil {
+ return nil, xerrors.Errorf("failed to persist tipset: %w", err)
+ }
+
+ for _, blk := range blks {
+ if err := cg.cs.AddToTipSetTracker(ctx, blk.Header); err != nil {
+ return nil, xerrors.Errorf("failed to add to tipset tracker: %w", err)
+ }
+ }
+
+ if err := cg.cs.RefreshHeaviestTipSet(ctx, fts.TipSet().Height()); err != nil {
+ return nil, xerrors.Errorf("failed to put tipset: %w", err)
}
cg.CurTipset = fts
@@ -628,7 +639,7 @@ func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInf
return ValidWpostForTesting, nil
}
-func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
+func IsRoundWinner(ctx context.Context, round abi.ChainEpoch,
miner address.Address, brand types.BeaconEntry, mbi *api.MiningBaseInfo, a MiningCheckAPI) (*types.ElectionProof, error) {
buf := new(bytes.Buffer)
@@ -636,7 +647,7 @@ func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
return nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
}
- electionRand, err := rand.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())
+ electionRand, err := rand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())
if err != nil {
return nil, xerrors.Errorf("failed to draw randomness: %w", err)
}
diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go
index c083f4fda..2d9942464 100644
--- a/chain/gen/genesis/miners.go
+++ b/chain/gen/genesis/miners.go
@@ -43,6 +43,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/consensus"
+ lrand "github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@@ -590,19 +591,21 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
return c, nil
}
+var _ lrand.Rand = new(fakeRand)
+
// TODO: copied from actors test harness, deduplicate or remove from here
type fakeRand struct{}
-func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (fr *fakeRand) GetChainRandomness(ctx context.Context, randEpoch abi.ChainEpoch) ([32]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
- return out, nil
+ return *(*[32]byte)(out), nil
}
-func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, randEpoch abi.ChainEpoch) ([32]byte, error) {
out := make([]byte, 32)
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
- return out, nil
+ return *(*[32]byte)(out), nil
}
func currentTotalPower(ctx context.Context, vm vm.Interface, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) {
diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go
index 0e6b00cfb..71b5dad9a 100644
--- a/chain/gen/slashfilter/slashfilter.go
+++ b/chain/gen/slashfilter/slashfilter.go
@@ -26,20 +26,30 @@ func New(dstore ds.Batching) *SlashFilter {
}
}
-func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) (cid.Cid, error) {
+func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) (cid.Cid, bool, error) {
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
{
// double-fork mining (2 blocks at one epoch)
- if witness, err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil {
- return witness, xerrors.Errorf("check double-fork mining faults: %w", err)
+ doubleForkWitness, doubleForkFault, err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults")
+ if err != nil {
+ return cid.Undef, false, xerrors.Errorf("check double-fork mining faults: %w", err)
+ }
+
+ if doubleForkFault {
+ return doubleForkWitness, doubleForkFault, nil
}
}
parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes()))
{
// time-offset mining faults (2 blocks with the same parents)
- if witness, err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil {
- return witness, xerrors.Errorf("check time-offset mining faults: %w", err)
+ timeOffsetWitness, timeOffsetFault, err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults")
+ if err != nil {
+ return cid.Undef, false, xerrors.Errorf("check time-offset mining faults: %w", err)
+ }
+
+ if timeOffsetFault {
+ return timeOffsetWitness, timeOffsetFault, nil
}
}
@@ -50,19 +60,19 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par
parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch))
have, err := f.byEpoch.Has(ctx, parentEpochKey)
if err != nil {
- return cid.Undef, err
+ return cid.Undef, false, xerrors.Errorf("failed to read from db: %w", err)
}
if have {
// If we had, make sure it's in our parent tipset
cidb, err := f.byEpoch.Get(ctx, parentEpochKey)
if err != nil {
- return cid.Undef, xerrors.Errorf("getting other block cid: %w", err)
+ return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err)
}
_, parent, err := cid.CidFromBytes(cidb)
if err != nil {
- return cid.Undef, err
+ return cid.Undef, false, xerrors.Errorf("failed to read cid from bytes: %w", err)
}
var found bool
@@ -73,45 +83,45 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par
}
if !found {
- return parent, xerrors.Errorf("produced block would trigger 'parent-grinding fault' consensus fault; miner: %s; bh: %s, expected parent: %s", bh.Miner, bh.Cid(), parent)
+ return parent, true, nil
}
}
}
if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil {
- return cid.Undef, xerrors.Errorf("putting byEpoch entry: %w", err)
+ return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err)
}
if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil {
- return cid.Undef, xerrors.Errorf("putting byEpoch entry: %w", err)
+ return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err)
}
- return cid.Undef, nil
+ return cid.Undef, false, nil
}
-func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) (cid.Cid, error) {
+func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) (cid.Cid, bool, error) {
fault, err := t.Has(ctx, key)
if err != nil {
- return cid.Undef, xerrors.Errorf("failed to read from datastore: %w", err)
+ return cid.Undef, false, xerrors.Errorf("failed to read from datastore: %w", err)
}
if fault {
cidb, err := t.Get(ctx, key)
if err != nil {
- return cid.Undef, xerrors.Errorf("getting other block cid: %w", err)
+ return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err)
}
_, other, err := cid.CidFromBytes(cidb)
if err != nil {
- return cid.Undef, err
+ return cid.Undef, false, xerrors.Errorf("failed to read cid of other block: %w", err)
}
if other == bh.Cid() {
- return cid.Undef, nil
+ return cid.Undef, false, nil
}
- return other, xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other)
+ return other, true, nil
}
- return cid.Undef, nil
+ return cid.Undef, false, nil
}
diff --git a/chain/gen/slashfilter/slashsvc/slashservice.go b/chain/gen/slashfilter/slashsvc/slashservice.go
new file mode 100644
index 000000000..7a6622880
--- /dev/null
+++ b/chain/gen/slashfilter/slashsvc/slashservice.go
@@ -0,0 +1,179 @@
+package slashsvc
+
+import (
+ "context"
+ "time"
+
+ "github.com/ipfs/go-cid"
+ levelds "github.com/ipfs/go-ds-leveldb"
+ logging "github.com/ipfs/go-log/v2"
+ ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ cborutil "github.com/filecoin-project/go-cbor-util"
+ "github.com/filecoin-project/go-state-types/builtin"
+ "github.com/filecoin-project/specs-actors/actors/builtin/miner"
+
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/gen/slashfilter"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+var log = logging.Logger("slashsvc")
+
+type ConsensusSlasherApi interface {
+ ChainHead(context.Context) (*types.TipSet, error)
+ ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
+ MpoolPushMessage(ctx context.Context, msg *types.Message, spec *lapi.MessageSendSpec) (*types.SignedMessage, error)
+ SyncIncomingBlocks(context.Context) (<-chan *types.BlockHeader, error)
+ WalletDefaultAddress(context.Context) (address.Address, error)
+}
+
+func SlashConsensus(ctx context.Context, a ConsensusSlasherApi, p string, from string) error {
+ var fromAddr address.Address
+
+ ds, err := levelds.NewDatastore(p, &levelds.Options{
+ Compression: ldbopts.NoCompression,
+ NoSync: false,
+ Strict: ldbopts.StrictAll,
+ ReadOnly: false,
+ })
+ if err != nil {
+ return xerrors.Errorf("open leveldb: %w", err)
+ }
+ sf := slashfilter.New(ds)
+ if from == "" {
+ defaddr, err := a.WalletDefaultAddress(ctx)
+ if err != nil {
+ return err
+ }
+ fromAddr = defaddr
+ } else {
+ addr, err := address.NewFromString(from)
+ if err != nil {
+ return err
+ }
+
+ fromAddr = addr
+ }
+
+ blocks, err := a.SyncIncomingBlocks(ctx)
+ if err != nil {
+ return xerrors.Errorf("sync incoming blocks failed: %w", err)
+ }
+
+ log.Infow("consensus fault reporter", "from", fromAddr)
+ go func() {
+ for block := range blocks {
+ otherBlock, extraBlock, fault, err := slashFilterMinedBlock(ctx, sf, a, block)
+ if err != nil {
+ log.Errorf("slash detector errored: %s", err)
+ continue
+ }
+ if fault {
+ log.Errorf(" SLASH FILTER DETECTED FAULT DUE TO BLOCKS %s and %s", otherBlock.Cid(), block.Cid())
+ bh1, err := cborutil.Dump(otherBlock)
+ if err != nil {
+ log.Errorf("could not dump otherblock:%s, err:%s", otherBlock.Cid(), err)
+ continue
+ }
+
+ bh2, err := cborutil.Dump(block)
+ if err != nil {
+ log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
+ continue
+ }
+
+ params := miner.ReportConsensusFaultParams{
+ BlockHeader1: bh1,
+ BlockHeader2: bh2,
+ }
+ if extraBlock != nil {
+ be, err := cborutil.Dump(extraBlock)
+ if err != nil {
+ log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
+ continue
+ }
+ params.BlockHeaderExtra = be
+ }
+
+ enc, err := actors.SerializeParams(¶ms)
+ if err != nil {
+ log.Errorf("could not serialize declare faults parameters: %s", err)
+ continue
+ }
+ for {
+ head, err := a.ChainHead(ctx)
+ if err != nil || head.Height() > block.Height {
+ break
+ }
+ time.Sleep(time.Second * 10)
+ }
+ message, err := a.MpoolPushMessage(ctx, &types.Message{
+ To: block.Miner,
+ From: fromAddr,
+ Value: types.NewInt(0),
+ Method: builtin.MethodsMiner.ReportConsensusFault,
+ Params: enc,
+ }, nil)
+ if err != nil {
+ log.Errorf("ReportConsensusFault to messagepool error:%s", err)
+ continue
+ }
+ log.Infof("ReportConsensusFault message CID:%s", message.Cid())
+
+ }
+ }
+ }()
+
+ return nil
+}
+
+func slashFilterMinedBlock(ctx context.Context, sf *slashfilter.SlashFilter, a ConsensusSlasherApi, blockB *types.BlockHeader) (*types.BlockHeader, *types.BlockHeader, bool, error) {
+ blockC, err := a.ChainGetBlock(ctx, blockB.Parents[0])
+ if err != nil {
+ return nil, nil, false, xerrors.Errorf("chain get block error:%s", err)
+ }
+
+ blockACid, fault, err := sf.MinedBlock(ctx, blockB, blockC.Height)
+ if err != nil {
+ return nil, nil, false, xerrors.Errorf("slash filter check block error:%s", err)
+ }
+
+ if !fault {
+ return nil, nil, false, nil
+ }
+
+ blockA, err := a.ChainGetBlock(ctx, blockACid)
+ if err != nil {
+ return nil, nil, false, xerrors.Errorf("failed to get blockA: %w", err)
+ }
+
+ // (a) double-fork mining (2 blocks at one epoch)
+ if blockA.Height == blockB.Height {
+ return blockA, nil, true, nil
+ }
+
+ // (b) time-offset mining faults (2 blocks with the same parents)
+ if types.CidArrsEqual(blockB.Parents, blockA.Parents) {
+ return blockA, nil, true, nil
+ }
+
+ // (c) parent-grinding fault
+ // Here extra is the "witness", a third block that shows the connection between A and B as
+ // A's sibling and B's parent.
+ // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset
+ //
+ // B
+ // |
+ // [A, C]
+ if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
+ types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
+ return blockA, blockC, true, nil
+ }
+
+ log.Error("unexpectedly reached end of slashFilterMinedBlock despite fault being reported!")
+ return nil, nil, false, nil
+}
diff --git a/chain/market/store.go b/chain/market/store.go
index ece1248f6..10ab2abe1 100644
--- a/chain/market/store.go
+++ b/chain/market/store.go
@@ -39,23 +39,6 @@ func (ps *Store) save(ctx context.Context, state *FundedAddressState) error {
return ps.ds.Put(ctx, k, b)
}
-// get the state for the given address
-func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) {
- k := dskeyForAddr(addr)
-
- data, err := ps.ds.Get(ctx, k)
- if err != nil {
- return nil, err
- }
-
- var state FundedAddressState
- err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state)
- if err != nil {
- return nil, err
- }
- return &state, nil
-}
-
// forEach calls iter with each address in the datastore
func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error {
res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr})
diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go
index 50f64f903..6dc3f2239 100644
--- a/chain/messagepool/messagepool.go
+++ b/chain/messagepool/messagepool.go
@@ -63,6 +63,9 @@ var MaxNonceGap = uint64(4)
const MaxMessageSize = 64 << 10 // 64KiB
+// NOTE: When adding a new error type, please make sure to add the new error type in
+// func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message)
+// in /chain/sub/incoming.go
var (
ErrMessageTooBig = errors.New("message too big")
diff --git a/chain/rand/rand.go b/chain/rand/rand.go
index c35280ab5..40f9f593a 100644
--- a/chain/rand/rand.go
+++ b/chain/rand/rand.go
@@ -17,18 +17,20 @@ import (
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/vm"
)
var log = logging.Logger("rand")
-func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func DrawRandomnessFromBase(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ return DrawRandomnessFromDigest(blake2b.Sum256(rbase), pers, round, entropy)
+}
+
+func DrawRandomnessFromDigest(digest [32]byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
h := blake2b.New256()
if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil {
return nil, xerrors.Errorf("deriving randomness: %w", err)
}
- VRFDigest := blake2b.Sum256(rbase)
- _, err := h.Write(VRFDigest[:])
+ _, err := h.Write(digest[:])
if err != nil {
return nil, xerrors.Errorf("hashing VRFDigest: %w", err)
}
@@ -70,18 +72,18 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch
return randTs, nil
}
-func (sr *stateRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
+func (sr *stateRand) getChainRandomness(ctx context.Context, round abi.ChainEpoch, lookback bool) ([32]byte, error) {
_, span := trace.StartSpan(ctx, "store.GetChainRandomness")
defer span.End()
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...))
if err != nil {
- return nil, err
+ return [32]byte{}, err
}
if round > ts.Height() {
- return nil, xerrors.Errorf("cannot draw randomness from the future")
+ return [32]byte{}, xerrors.Errorf("cannot draw randomness from the future")
}
searchHeight := round
@@ -91,14 +93,10 @@ func (sr *stateRand) getChainRandomness(ctx context.Context, pers crypto.DomainS
randTs, err := sr.cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
if err != nil {
- return nil, err
+ return [32]byte{}, err
}
- mtb := randTs.MinTicketBlock()
-
- // if at (or just past -- for null epochs) appropriate epoch
- // or at genesis (works for negative epochs)
- return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy)
+ return blake2b.Sum256(randTs.MinTicketBlock().Ticket.VRFProof), nil
}
type NetworkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
@@ -110,7 +108,12 @@ type stateRand struct {
networkVersionGetter NetworkVersionGetter
}
-func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, networkVersionGetter NetworkVersionGetter) vm.Rand {
+type Rand interface {
+ GetChainRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error)
+ GetBeaconRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error)
+}
+
+func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, networkVersionGetter NetworkVersionGetter) Rand {
return &stateRand{
cs: cs,
blks: blks,
@@ -120,76 +123,102 @@ func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, netwo
}
// network v0-12
-func (sr *stateRand) getBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (sr *stateRand) getBeaconRandomnessV1(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, true)
if err != nil {
- return nil, err
+ return [32]byte{}, err
}
be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs)
if err != nil {
- return nil, err
+ return [32]byte{}, err
}
- // if at (or just past -- for null epochs) appropriate epoch
- // or at genesis (works for negative epochs)
- return DrawRandomness(be.Data, pers, round, entropy)
+ return blake2b.Sum256(be.Data), nil
}
// network v13
-func (sr *stateRand) getBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (sr *stateRand) getBeaconRandomnessV2(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, false)
if err != nil {
- return nil, err
+ return [32]byte{}, err
}
be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs)
if err != nil {
- return nil, err
+ return [32]byte{}, err
}
- // if at (or just past -- for null epochs) appropriate epoch
- // or at genesis (works for negative epochs)
- return DrawRandomness(be.Data, pers, round, entropy)
+ return blake2b.Sum256(be.Data), nil
}
// network v14 and on
-func (sr *stateRand) getBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (sr *stateRand) getBeaconRandomnessV3(ctx context.Context, filecoinEpoch abi.ChainEpoch) ([32]byte, error) {
if filecoinEpoch < 0 {
- return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy)
+ return sr.getBeaconRandomnessV2(ctx, filecoinEpoch)
}
be, err := sr.extractBeaconEntryForEpoch(ctx, filecoinEpoch)
if err != nil {
log.Errorf("failed to get beacon entry as expected: %s", err)
- return nil, err
+ return [32]byte{}, err
}
- return DrawRandomness(be.Data, pers, filecoinEpoch, entropy)
+ return blake2b.Sum256(be.Data), nil
}
-func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (sr *stateRand) GetChainRandomness(ctx context.Context, filecoinEpoch abi.ChainEpoch) ([32]byte, error) {
nv := sr.networkVersionGetter(ctx, filecoinEpoch)
if nv >= network.Version13 {
- return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, false)
+ return sr.getChainRandomness(ctx, filecoinEpoch, false)
}
- return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, true)
+ return sr.getChainRandomness(ctx, filecoinEpoch, true)
}
-func (sr *stateRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (sr *stateRand) GetBeaconRandomness(ctx context.Context, filecoinEpoch abi.ChainEpoch) ([32]byte, error) {
nv := sr.networkVersionGetter(ctx, filecoinEpoch)
if nv >= network.Version14 {
- return sr.getBeaconRandomnessV3(ctx, pers, filecoinEpoch, entropy)
+ return sr.getBeaconRandomnessV3(ctx, filecoinEpoch)
} else if nv == network.Version13 {
- return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy)
+ return sr.getBeaconRandomnessV2(ctx, filecoinEpoch)
} else {
- return sr.getBeaconRandomnessV1(ctx, pers, filecoinEpoch, entropy)
+ return sr.getBeaconRandomnessV1(ctx, filecoinEpoch)
}
}
+func (sr *stateRand) DrawChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ digest, err := sr.GetChainRandomness(ctx, filecoinEpoch)
+
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get chain randomness: %w", err)
+ }
+
+ ret, err := DrawRandomnessFromDigest(digest, pers, filecoinEpoch, entropy)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to draw chain randomness: %w", err)
+ }
+
+ return ret, nil
+}
+
+func (sr *stateRand) DrawBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
+ digest, err := sr.GetBeaconRandomness(ctx, filecoinEpoch)
+
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get beacon randomness: %w", err)
+ }
+
+ ret, err := DrawRandomnessFromDigest(digest, pers, filecoinEpoch, entropy)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to draw beacon randomness: %w", err)
+ }
+
+ return ret, nil
+}
+
func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpoch abi.ChainEpoch) (*types.BeaconEntry, error) {
randTs, err := sr.GetBeaconRandomnessTipset(ctx, filecoinEpoch, false)
if err != nil {
diff --git a/chain/rand/rand_test.go b/chain/rand/rand_test.go
index acd928854..e2e722165 100644
--- a/chain/rand/rand_test.go
+++ b/chain/rand/rand_test.go
@@ -69,7 +69,7 @@ func TestNullRandomnessV1(t *testing.T) {
}
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
- rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
+ rand2, err := rand.DrawRandomnessFromBase(resp.Entry.Data, pers, randEpoch, entropy)
if err != nil {
t.Fatal(err)
}
@@ -148,8 +148,8 @@ func TestNullRandomnessV2(t *testing.T) {
}
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_03
- // note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height)
- rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
+ // note that the randEpoch passed to DrawRandomnessFromBase is still randEpoch (not the latest ts height)
+ rand2, err := rand.DrawRandomnessFromBase(resp.Entry.Data, pers, randEpoch, entropy)
if err != nil {
t.Fatal(err)
}
@@ -232,7 +232,7 @@ func TestNullRandomnessV3(t *testing.T) {
}
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
- rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
+ rand2, err := rand.DrawRandomnessFromBase(resp.Entry.Data, pers, randEpoch, entropy)
if err != nil {
t.Fatal(err)
}
diff --git a/chain/state/statetree.go b/chain/state/statetree.go
index 3142a07d8..c71473e8f 100644
--- a/chain/state/statetree.go
+++ b/chain/state/statetree.go
@@ -156,7 +156,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) {
case network.Version13, network.Version14, network.Version15, network.Version16, network.Version17:
return types.StateTreeVersion4, nil
- case network.Version18, network.Version19, network.Version20:
+ case network.Version18, network.Version19, network.Version20, network.Version21:
return types.StateTreeVersion5, nil
default:
diff --git a/chain/stmgr/actors.go b/chain/stmgr/actors.go
index 4de39c7f1..56744fa74 100644
--- a/chain/stmgr/actors.go
+++ b/chain/stmgr/actors.go
@@ -355,7 +355,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
return nil, xerrors.Errorf("failed to marshal miner address: %w", err)
}
- prand, err := rand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
+ prand, err := rand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
if err != nil {
return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err)
}
diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go
index 1f9977d96..2f18bde82 100644
--- a/chain/stmgr/forks.go
+++ b/chain/stmgr/forks.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
+ "errors"
"os"
"sort"
"strings"
@@ -11,6 +12,7 @@ import (
"time"
"github.com/ipfs/go-cid"
+ "github.com/ipfs/go-datastore"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@@ -177,11 +179,15 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig
u := sm.stateMigrations[height]
if u != nil && u.upgrade != nil {
migCid, ok, err := u.migrationResultCache.Get(ctx, root)
- if err == nil && ok {
- log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
- return migCid, nil
- } else if err != nil {
+ if err == nil {
+ if ok {
+ log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
+ return migCid, nil
+ }
+ } else if !errors.Is(err, datastore.ErrNotFound) {
log.Errorw("failed to lookup previous migration result", "err", err)
+ } else {
+ log.Debug("no cached migration found, migrating from scratch")
}
startTime := time.Now()
@@ -226,11 +232,6 @@ func (sm *StateManager) hasExpensiveForkBetween(parent, height abi.ChainEpoch) b
return false
}
-func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool {
- _, ok := sm.expensiveUpgrades[height]
- return ok
-}
-
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv16.MemMigrationCache, ts *types.TipSet) {
height := ts.Height()
parent := ts.ParentState()
diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go
index 12b991e57..49913e442 100644
--- a/chain/stmgr/stmgr.go
+++ b/chain/stmgr/stmgr.go
@@ -7,7 +7,7 @@ import (
"strconv"
"sync"
- lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/hashicorp/golang-lru/arc/v2"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor"
@@ -156,7 +156,7 @@ type StateManager struct {
// We keep a small cache for calls to ExecutionTrace which helps improve
// performance for node operators like exchanges and block explorers
- execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
+ execTraceCache *arc.ARCCache[types.TipSetKey, tipSetCacheEntry]
// We need a lock while making the copy as to prevent other callers
// overwrite the cache while making the copy
execTraceCacheLock sync.Mutex
@@ -213,10 +213,10 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
}
log.Debugf("execTraceCache size: %d", execTraceCacheSize)
- var execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
+ var execTraceCache *arc.ARCCache[types.TipSetKey, tipSetCacheEntry]
var err error
if execTraceCacheSize > 0 {
- execTraceCache, err = lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
+ execTraceCache, err = arc.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
if err != nil {
return nil, err
}
@@ -509,7 +509,17 @@ func (sm *StateManager) GetRandomnessFromBeacon(ctx context.Context, personaliza
r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
- return r.GetBeaconRandomness(ctx, personalization, randEpoch, entropy)
+ digest, err := r.GetBeaconRandomness(ctx, randEpoch)
+ if err != nil {
+ return nil, xerrors.Errorf("getting beacon randomness: %w", err)
+ }
+
+ ret, err := rand.DrawRandomnessFromDigest(digest, personalization, randEpoch, entropy)
+ if err != nil {
+ return nil, xerrors.Errorf("drawing beacon randomness: %w", err)
+ }
+
+ return ret, nil
}
@@ -521,5 +531,38 @@ func (sm *StateManager) GetRandomnessFromTickets(ctx context.Context, personaliz
r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
- return r.GetChainRandomness(ctx, personalization, randEpoch, entropy)
+ digest, err := r.GetChainRandomness(ctx, randEpoch)
+ if err != nil {
+ return nil, xerrors.Errorf("getting chain randomness: %w", err)
+ }
+
+ ret, err := rand.DrawRandomnessFromDigest(digest, personalization, randEpoch, entropy)
+ if err != nil {
+ return nil, xerrors.Errorf("drawing chain randomness: %w", err)
+ }
+
+ return ret, nil
+}
+
+func (sm *StateManager) GetRandomnessDigestFromBeacon(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) ([32]byte, error) {
+ pts, err := sm.ChainStore().GetTipSetFromKey(ctx, tsk)
+ if err != nil {
+ return [32]byte{}, xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ }
+
+ r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
+
+ return r.GetBeaconRandomness(ctx, randEpoch)
+
+}
+
+func (sm *StateManager) GetRandomnessDigestFromTickets(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) ([32]byte, error) {
+ pts, err := sm.ChainStore().LoadTipSet(ctx, tsk)
+ if err != nil {
+ return [32]byte{}, xerrors.Errorf("loading tipset key: %w", err)
+ }
+
+ r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
+
+ return r.GetChainRandomness(ctx, randEpoch)
}
diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go
index b48f9af43..9486cb936 100644
--- a/chain/stmgr/supply.go
+++ b/chain/stmgr/supply.go
@@ -388,6 +388,14 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
circ := big.Zero()
unCirc := big.Zero()
err := st.ForEach(func(a address.Address, actor *types.Actor) error {
+ // this can be a lengthy operation, we need to cancel early when
+ // the context is cancelled to avoid resource exhaustion
+ select {
+ case <-ctx.Done():
+ // this will cause ForEach to return
+ return ctx.Err()
+ default:
+ }
switch {
case actor.Balance.IsZero():
// Do nothing for zero-balance actors
diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go
index bc2cb5e73..c5dff94a8 100644
--- a/chain/store/checkpoint_test.go
+++ b/chain/store/checkpoint_test.go
@@ -70,7 +70,7 @@ func TestChainCheckpoint(t *testing.T) {
}
// See if the chain will take the fork, it shouldn't.
- err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
+ err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
require.NoError(t, err)
head = cs.GetHeaviestTipSet()
require.True(t, head.Equals(checkpoint))
@@ -80,7 +80,7 @@ func TestChainCheckpoint(t *testing.T) {
require.NoError(t, err)
// Now switch to the other fork.
- err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
+ err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
require.NoError(t, err)
head = cs.GetHeaviestTipSet()
require.True(t, head.Equals(last))
diff --git a/chain/store/index_test.go b/chain/store/index_test.go
index 63a1abad0..a3a4ad6ce 100644
--- a/chain/store/index_test.go
+++ b/chain/store/index_test.go
@@ -16,6 +16,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
)
@@ -47,28 +48,29 @@ func TestIndexSeeks(t *testing.T) {
}
cur := mock.TipSet(gen)
- if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
- t.Fatal(err)
- }
+
assert.NoError(t, cs.SetGenesis(ctx, gen))
// Put 113 blocks from genesis
for i := 0; i < 113; i++ {
- nextts := mock.TipSet(mock.MkBlock(cur, 1, 1))
-
- if err := cs.PutTipSet(ctx, nextts); err != nil {
- t.Fatal(err)
- }
+ nextBlk := mock.MkBlock(cur, 1, 1)
+ nextts := mock.TipSet(nextBlk)
+ assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{nextts}))
+ assert.NoError(t, cs.AddToTipSetTracker(ctx, nextBlk))
cur = nextts
}
+ assert.NoError(t, cs.RefreshHeaviestTipSet(ctx, cur.Height()))
+
// Put 50 null epochs + 1 block
skip := mock.MkBlock(cur, 1, 1)
skip.Height += 50
-
skipts := mock.TipSet(skip)
- if err := cs.PutTipSet(ctx, skipts); err != nil {
+ assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{skipts}))
+ assert.NoError(t, cs.AddToTipSetTracker(ctx, skip))
+
+ if err := cs.RefreshHeaviestTipSet(ctx, skip.Height); err != nil {
t.Fatal(err)
}
diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go
index 92bc238a6..5e218fa36 100644
--- a/chain/store/snapshot.go
+++ b/chain/store/snapshot.go
@@ -15,7 +15,7 @@ import (
"github.com/ipld/go-car"
carutil "github.com/ipld/go-car/util"
carv2 "github.com/ipld/go-car/v2"
- mh "github.com/multiformats/go-multihash"
+ "github.com/multiformats/go-multicodec"
cbg "github.com/whyrusleeping/cbor-gen"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
@@ -369,14 +369,16 @@ func (s *walkScheduler) Wait() error {
}
func (s *walkScheduler) enqueueIfNew(task walkTask) {
- if task.c.Prefix().MhType == mh.IDENTITY {
+ if multicodec.Code(task.c.Prefix().MhType) == multicodec.Identity {
//log.Infow("ignored", "cid", todo.c.String())
return
}
- // This lets through RAW and CBOR blocks, the only two types that we
- // end up writing to the exported CAR.
- if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
+ // This lets through RAW, CBOR, and DagCBOR blocks, the only types that we end up writing to
+ // the exported CAR.
+ switch multicodec.Code(task.c.Prefix().Codec) {
+ case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
+ default:
//log.Infow("ignored", "cid", todo.c.String())
return
}
@@ -450,7 +452,8 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
// We exported the ipld block. If it wasn't a CBOR block, there's nothing
// else to do and we can bail out early as it won't have any links
// etc.
- if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
+ if multicodec.Code(t.c.Prefix().Codec) != multicodec.DagCbor ||
+ multicodec.Code(t.c.Prefix().MhType) == multicodec.Identity {
return nil
}
@@ -683,14 +686,13 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
prefix := c.Prefix()
// Don't include identity CIDs.
- if prefix.MhType == mh.IDENTITY {
+ if multicodec.Code(prefix.MhType) == multicodec.Identity {
continue
}
- // We only include raw and dagcbor, for now.
- // Raw for "code" CIDs.
- switch prefix.Codec {
- case cid.Raw, cid.DagCBOR:
+ // We only include raw, cbor, and dagcbor, for now.
+ switch multicodec.Code(prefix.Codec) {
+ case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
default:
continue
}
@@ -722,7 +724,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
}
func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
- if root.Prefix().Codec != cid.DagCBOR {
+ if multicodec.Code(root.Prefix().Codec) != multicodec.DagCbor {
return in, nil
}
diff --git a/chain/store/store.go b/chain/store/store.go
index 88103ac48..f2826fc2f 100644
--- a/chain/store/store.go
+++ b/chain/store/store.go
@@ -11,7 +11,7 @@ import (
"sync"
"time"
- lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/hashicorp/golang-lru/arc/v2"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
@@ -120,8 +120,8 @@ type ChainStore struct {
reorgCh chan<- reorg
reorgNotifeeCh chan ReorgNotifee
- mmCache *lru.ARCCache[cid.Cid, mmCids]
- tsCache *lru.ARCCache[types.TipSetKey, *types.TipSet]
+ mmCache *arc.ARCCache[cid.Cid, mmCids]
+ tsCache *arc.ARCCache[types.TipSetKey, *types.TipSet]
evtTypes [1]journal.EventType
journal journal.Journal
@@ -133,8 +133,8 @@ type ChainStore struct {
}
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
- c, _ := lru.NewARC[cid.Cid, mmCids](DefaultMsgMetaCacheSize)
- tsc, _ := lru.NewARC[types.TipSetKey, *types.TipSet](DefaultTipSetCacheSize)
+ c, _ := arc.NewARC[cid.Cid, mmCids](DefaultMsgMetaCacheSize)
+ tsc, _ := arc.NewARC[types.TipSetKey, *types.TipSet](DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
@@ -367,49 +367,32 @@ func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid)
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {
- return err
+ return xerrors.Errorf("failed to construct genesis tipset: %w", err)
}
- if err := cs.PutTipSet(ctx, ts); err != nil {
- return err
+ if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
+ return xerrors.Errorf("failed to persist genesis tipset: %w", err)
+ }
+
+ if err := cs.AddToTipSetTracker(ctx, b); err != nil {
+ return xerrors.Errorf("failed to add genesis tipset to tracker: %w", err)
+ }
+
+ if err := cs.RefreshHeaviestTipSet(ctx, ts.Height()); err != nil {
+ return xerrors.Errorf("failed to put genesis tipset: %w", err)
}
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
}
-func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
- if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
- return xerrors.Errorf("failed to persist tipset: %w", err)
- }
-
- expanded, err := cs.expandTipset(ctx, ts.Blocks()[0])
- if err != nil {
- return xerrors.Errorf("errored while expanding tipset: %w", err)
- }
-
- if expanded.Key() != ts.Key() {
- log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
-
- tsBlk, err := expanded.Key().ToStorageBlock()
- if err != nil {
- return xerrors.Errorf("failed to get tipset key block: %w", err)
- }
-
- if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
- return xerrors.Errorf("failed to put tipset key block: %w", err)
- }
- }
-
- if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
- return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
- }
- return nil
-}
-
-// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
-// internal state as our new head, if and only if it is heavier than the current
-// head and does not exceed the maximum fork length.
-func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
+// RefreshHeaviestTipSet receives a newTsHeight at which a new tipset might exist. It then:
+// - "refreshes" the heaviest tipset that can be formed at its current heaviest height
+// - if equivocation is detected among the miners of the current heaviest tipset, the head is immediately updated to the heaviest tipset that can be formed in a range of 5 epochs
+//
+// - forms the best tipset that can be formed at the _input_ height
+// - compares the three tipset weights: "current" heaviest tipset, "refreshed" tipset, and best tipset at newTsHeight
+// - updates "current" heaviest to the heaviest of those 3 tipsets (if an update is needed), assuming it doesn't violate the maximum fork rule
+func (cs *ChainStore) RefreshHeaviestTipSet(ctx context.Context, newTsHeight abi.ChainEpoch) error {
for {
cs.heaviestLk.Lock()
if len(cs.reorgCh) < reorgChBuf/2 {
@@ -426,39 +409,90 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
defer cs.heaviestLk.Unlock()
- if ts.Equals(cs.heaviest) {
+ heaviestWeight, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
+ if err != nil {
+ return xerrors.Errorf("failed to calculate currentHeaviest's weight: %w", err)
+ }
+
+ heaviestHeight := abi.ChainEpoch(0)
+ if cs.heaviest != nil {
+ heaviestHeight = cs.heaviest.Height()
+ }
+
+ // Before we look at newTs, let's refresh best tipset at current head's height -- this is done to detect equivocation
+ newHeaviest, newHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, heaviestHeight)
+ if err != nil {
+ return xerrors.Errorf("failed to reform head at same height: %w", err)
+ }
+
+ // Equivocation has occurred! We need a new head NOW!
+ if newHeaviest == nil || newHeaviestWeight.LessThan(heaviestWeight) {
+ log.Warnf("chainstore heaviest tipset's weight SHRANK from %d (%s) to %d (%s) due to equivocation", heaviestWeight, cs.heaviest, newHeaviestWeight, newHeaviest)
+ // Unfortunately, we don't know what the right height to form a new heaviest tipset is.
+ // It is _probably_, but not _necessarily_, heaviestHeight.
+ // So, we need to explore a range of epochs, finding the heaviest tipset in that range.
+ // We thus try to form the heaviest tipset for 5 epochs above heaviestHeight (most of which will likely not exist),
+ // as well as for 5 below.
+ // This is slow, but we expect to almost-never be here (only if miners are equivocating, which carries a hefty penalty).
+ for i := heaviestHeight + 5; i > heaviestHeight-5; i-- {
+ possibleHeaviestTs, possibleHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, i)
+ if err != nil {
+ return xerrors.Errorf("failed to produce head at height %d: %w", i, err)
+ }
+
+ if possibleHeaviestWeight.GreaterThan(newHeaviestWeight) {
+ newHeaviestWeight = possibleHeaviestWeight
+ newHeaviest = possibleHeaviestTs
+ }
+ }
+
+ // if we've found something, we know it's the heaviest equivocation-free head, take it IMMEDIATELY
+ if newHeaviest != nil {
+ errTake := cs.takeHeaviestTipSet(ctx, newHeaviest)
+ if errTake != nil {
+ return xerrors.Errorf("failed to take newHeaviest tipset as head: %w", err)
+ }
+ } else {
+ // if we haven't found something, just stay with our equivocation-y head
+ newHeaviest = cs.heaviest
+ }
+ }
+
+ // if the new height we were notified about isn't what we just refreshed at, see if we have a heavier tipset there
+ if newTsHeight != newHeaviest.Height() {
+ bestTs, bestTsWeight, err := cs.FormHeaviestTipSetForHeight(ctx, newTsHeight)
+ if err != nil {
+ return xerrors.Errorf("failed to form new heaviest tipset at height %d: %w", newTsHeight, err)
+ }
+
+ heavier := bestTsWeight.GreaterThan(newHeaviestWeight)
+ if bestTsWeight.Equals(newHeaviestWeight) {
+ heavier = breakWeightTie(bestTs, newHeaviest)
+ }
+
+ if heavier {
+ newHeaviest = bestTs
+ }
+ }
+
+ // Everything's the same as before, exit early
+ if newHeaviest.Equals(cs.heaviest) {
return nil
}
- w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
+ // At this point, it MUST be true that newHeaviest is heavier than cs.heaviest -- update if fork allows
+ exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, newHeaviest)
if err != nil {
- return err
+ return xerrors.Errorf("failed to check fork length: %w", err)
}
- heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
+
+ if exceeds {
+ return nil
+ }
+
+ err = cs.takeHeaviestTipSet(ctx, newHeaviest)
if err != nil {
- return err
- }
-
- heavier := w.GreaterThan(heaviestW)
- if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
- log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
- heavier = breakWeightTie(ts, cs.heaviest)
- }
-
- if heavier {
- // TODO: don't do this for initial sync. Now that we don't have a
- // difference between 'bootstrap sync' and 'caught up' sync, we need
- // some other heuristic.
-
- exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts)
- if err != nil {
- return err
- }
- if exceeds {
- return nil
- }
-
- return cs.takeHeaviestTipSet(ctx, ts)
+ return xerrors.Errorf("failed to take heaviest tipset: %w", err)
}
return nil
@@ -655,6 +689,16 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
return err
}
+ // write the tipsetkey block to the blockstore for EthAPI queries
+ tsBlk, err := ts.Key().ToStorageBlock()
+ if err != nil {
+ return xerrors.Errorf("failed to get tipset key block: %w", err)
+ }
+
+ if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
+ return xerrors.Errorf("failed to put tipset key block: %w", err)
+ }
+
if prevHeaviest != nil { // buf
if len(cs.reorgCh) > 0 {
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
@@ -904,6 +948,14 @@ func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetK
var leftChain, rightChain []*types.TipSet
for !left.Equals(right) {
+ // this can take a long time and lot of memory if the tipsets are far apart
+ // since it can be reached through remote calls, we need to
+ // cancel early when possible to prevent resource exhaustion.
+ select {
+ case <-ctx.Done():
+ return nil, nil, ctx.Err()
+ default:
+ }
if left.Height() > right.Height() {
leftChain = append(leftChain, left)
par, err := lts(ctx, left.Parents())
@@ -960,7 +1012,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
// This means that we ideally want to keep only most recent 900 epochs in here
// Golang's map iteration starts at a random point in a map.
// With 5 tries per epoch, and 900 entries to keep, on average we will have
- // ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
+ // ~136 garbage entries in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
// Seems good enough to me
for height := range cs.tipsets {
@@ -975,6 +1027,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
return nil
}
+// PersistTipsets writes the provided blocks and the TipSetKey objects to the blockstore
func (cs *ChainStore) PersistTipsets(ctx context.Context, tipsets []*types.TipSet) error {
toPersist := make([]*types.BlockHeader, 0, len(tipsets)*int(build.BlocksPerEpoch))
tsBlks := make([]block.Block, 0, len(tipsets))
@@ -1027,44 +1080,72 @@ func (cs *ChainStore) persistBlockHeaders(ctx context.Context, b ...*types.Block
return err
}
-func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) {
- // Hold lock for the whole function for now, if it becomes a problem we can
- // fix pretty easily
+// FormHeaviestTipSetForHeight looks up all valid blocks at a given height, and returns the heaviest tipset that can be made at that height
+// It does not consider ANY blocks from miners that have "equivocated" (produced 2 blocks at the same height)
+func (cs *ChainStore) FormHeaviestTipSetForHeight(ctx context.Context, height abi.ChainEpoch) (*types.TipSet, types.BigInt, error) {
cs.tstLk.Lock()
defer cs.tstLk.Unlock()
- all := []*types.BlockHeader{b}
-
- tsets, ok := cs.tipsets[b.Height]
+ blockCids, ok := cs.tipsets[height]
if !ok {
- return types.NewTipSet(all)
+ return nil, types.NewInt(0), nil
}
- inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
- for _, bhc := range tsets {
- if bhc == b.Cid() {
- continue
- }
+ // First, identify "bad" miners for the height
+ seenMiners := map[address.Address]struct{}{}
+ badMiners := map[address.Address]struct{}{}
+ blocks := make([]*types.BlockHeader, 0, len(blockCids))
+ for _, bhc := range blockCids {
h, err := cs.GetBlock(ctx, bhc)
if err != nil {
- return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
+ return nil, types.NewInt(0), xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
}
- if cid, found := inclMiners[h.Miner]; found {
- log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
+ if _, seen := seenMiners[h.Miner]; seen {
+ badMiners[h.Miner] = struct{}{}
continue
}
+ seenMiners[h.Miner] = struct{}{}
+ blocks = append(blocks, h)
+ }
- if types.CidArrsEqual(h.Parents, b.Parents) {
- all = append(all, h)
- inclMiners[h.Miner] = bhc
+ // Next, group by parent tipset
+
+ formableTipsets := make(map[types.TipSetKey][]*types.BlockHeader, 0)
+ for _, h := range blocks {
+ if _, bad := badMiners[h.Miner]; bad {
+ continue
+ }
+ ptsk := types.NewTipSetKey(h.Parents...)
+ formableTipsets[ptsk] = append(formableTipsets[ptsk], h)
+ }
+
+ maxWeight := types.NewInt(0)
+ var maxTs *types.TipSet
+ for _, headers := range formableTipsets {
+ ts, err := types.NewTipSet(headers)
+ if err != nil {
+ return nil, types.NewInt(0), xerrors.Errorf("unexpected error forming tipset: %w", err)
+ }
+
+ weight, err := cs.Weight(ctx, ts)
+ if err != nil {
+ return nil, types.NewInt(0), xerrors.Errorf("failed to calculate weight: %w", err)
+ }
+
+ heavier := weight.GreaterThan(maxWeight)
+ if weight.Equals(maxWeight) {
+ heavier = breakWeightTie(ts, maxTs)
+ }
+
+ if heavier {
+ maxWeight = weight
+ maxTs = ts
}
}
- // TODO: other validation...?
-
- return types.NewTipSet(all)
+ return maxTs, maxWeight, nil
}
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
diff --git a/chain/store/store_test.go b/chain/store/store_test.go
index cea0fdc2a..9c717fdbe 100644
--- a/chain/store/store_test.go
+++ b/chain/store/store_test.go
@@ -10,6 +10,7 @@ import (
"github.com/ipfs/go-datastore"
"github.com/stretchr/testify/require"
+ "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
@@ -238,3 +239,171 @@ func TestChainExportImportFull(t *testing.T) {
}
}
}
+
+func TestEquivocations(t *testing.T) {
+ ctx := context.Background()
+ cg, err := gen.NewGenerator()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var last *types.TipSet
+ for i := 0; i < 10; i++ {
+ ts, err := cg.NextTipSet()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ last = ts.TipSet.TipSet()
+ }
+
+ mTs, err := cg.NextTipSetFromMiners(last, []address.Address{last.Blocks()[0].Miner}, 0)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(mTs.TipSet.TipSet().Cids()))
+ last = mTs.TipSet.TipSet()
+
+ require.NotEmpty(t, last.Blocks())
+ blk1 := *last.Blocks()[0]
+
+ // quick check: asking to form tipset at latest height just returns head
+ bestHead, bestHeadWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ require.Equal(t, last.Key(), bestHead.Key())
+ require.Contains(t, last.Cids(), blk1.Cid())
+ expectedWeight, err := cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // add another block by a different miner -- it should get included in the best tipset
+ blk2 := blk1
+ blk1Miner, err := address.IDFromAddress(blk2.Miner)
+ require.NoError(t, err)
+ blk2.Miner, err = address.NewIDAddress(blk1Miner + 50)
+ require.NoError(t, err)
+ addBlockToTracker(t, cg.ChainStore(), &blk2)
+
+ bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ for _, blkCid := range last.Cids() {
+ require.Contains(t, bestHead.Cids(), blkCid)
+ }
+ require.Contains(t, bestHead.Cids(), blk2.Cid())
+ expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // add another block by a different miner, but on a different tipset -- it should NOT get included
+ blk3 := blk1
+ blk3.Miner, err = address.NewIDAddress(blk1Miner + 100)
+ require.NoError(t, err)
+ blk1Parent, err := cg.ChainStore().GetBlock(ctx, blk3.Parents[0])
+ require.NoError(t, err)
+ blk3.Parents = blk1Parent.Parents
+ addBlockToTracker(t, cg.ChainStore(), &blk3)
+
+ bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ for _, blkCid := range last.Cids() {
+ require.Contains(t, bestHead.Cids(), blkCid)
+ }
+ require.Contains(t, bestHead.Cids(), blk2.Cid())
+ require.NotContains(t, bestHead.Cids(), blk3.Cid())
+ expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // add another block by the same miner as blk1 -- it should NOT get included, and blk1 should be excluded too
+ blk4 := blk1
+ blk4.Timestamp = blk1.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &blk4)
+
+ bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
+ require.NoError(t, err)
+ for _, blkCid := range last.Cids() {
+ if blkCid != blk1.Cid() {
+ require.Contains(t, bestHead.Cids(), blkCid)
+ }
+ }
+ require.NotContains(t, bestHead.Cids(), blk4.Cid())
+ require.NotContains(t, bestHead.Cids(), blk1.Cid())
+ expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
+ require.NoError(t, err)
+ require.Equal(t, expectedWeight, bestHeadWeight)
+
+ // check that after all of that, the chainstore's head has NOT changed
+ require.Equal(t, last.Key(), cg.ChainStore().GetHeaviestTipSet().Key())
+
+ // NOW, after all that, notify the chainstore to refresh its head
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
+
+ originalHead := *last
+ newHead := cg.ChainStore().GetHeaviestTipSet()
+ // the newHead should be at the same height as the originalHead
+ require.Equal(t, originalHead.Height(), newHead.Height())
+ // the newHead should NOT be the same as the originalHead
+ require.NotEqual(t, originalHead.Key(), newHead.Key())
+ // specifically, it should not contain any blocks by blk1Miner
+ for _, b := range newHead.Blocks() {
+ require.NotEqual(t, blk1.Miner, b.Miner)
+ }
+
+ // now have blk2's Miner equivocate too! this causes us to switch to a tipset with a different parent!
+ blk5 := blk2
+ blk5.Timestamp = blk5.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &blk5)
+
+ // notify the chainstore to refresh its head
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
+ newHead = cg.ChainStore().GetHeaviestTipSet()
+ // the newHead should still be at the same height as the originalHead
+ require.Equal(t, originalHead.Height(), newHead.Height())
+ // BUT it should no longer have the same parents -- only blk3's miner is good, and they mined on a different tipset
+ require.Equal(t, 1, len(newHead.Blocks()))
+ require.Equal(t, blk3.Cid(), newHead.Cids()[0])
+ require.NotEqual(t, originalHead.Parents(), newHead.Parents())
+
+ // now have blk3's Miner equivocate too! this causes us to switch to a previous epoch entirely :(
+ blk6 := blk3
+ blk6.Timestamp = blk6.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &blk6)
+
+ // trying to form a tipset at our previous height leads to emptiness
+ tryTs, tryTsWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blk1.Height)
+ require.NoError(t, err)
+ require.Nil(t, tryTs)
+ require.True(t, tryTsWeight.IsZero())
+
+ // notify the chainstore to refresh its head
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
+ newHead = cg.ChainStore().GetHeaviestTipSet()
+ // the newHead should now be one epoch behind originalHead
+ require.Greater(t, originalHead.Height(), newHead.Height())
+
+ // next, we create a new tipset with only one block after many null rounds
+ headAfterNulls, err := cg.NextTipSetFromMiners(newHead, []address.Address{newHead.Blocks()[0].Miner}, 15)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(headAfterNulls.TipSet.Blocks))
+
+ // now, we disqualify the block in this tipset because of equivocation
+ blkAfterNulls := headAfterNulls.TipSet.TipSet().Blocks()[0]
+ equivocatedBlkAfterNulls := *blkAfterNulls
+ equivocatedBlkAfterNulls.Timestamp = blkAfterNulls.Timestamp + 1
+ addBlockToTracker(t, cg.ChainStore(), &equivocatedBlkAfterNulls)
+
+ // try to form a tipset at this height -- it should be empty
+ tryTs2, tryTsWeight2, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blkAfterNulls.Height)
+ require.NoError(t, err)
+ require.Nil(t, tryTs2)
+ require.True(t, tryTsWeight2.IsZero())
+
+ // now we "notify" at this height -- it should lead to no head change because there's no formable head in near epochs
+ require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blkAfterNulls.Height))
+ require.True(t, headAfterNulls.TipSet.TipSet().Equals(cg.ChainStore().GetHeaviestTipSet()))
+}
+
+func addBlockToTracker(t *testing.T, cs *store.ChainStore, blk *types.BlockHeader) {
+ blk2Ts, err := types.NewTipSet([]*types.BlockHeader{blk})
+ require.NoError(t, err)
+ require.NoError(t, cs.PersistTipsets(context.TODO(), []*types.TipSet{blk2Ts}))
+ require.NoError(t, cs.AddToTipSetTracker(context.TODO(), blk))
+}
diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go
index a7c0bee57..3a11f7c98 100644
--- a/chain/sub/incoming.go
+++ b/chain/sub/incoming.go
@@ -350,6 +350,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
)
recordFailure(ctx, metrics.MessageValidationFailure, "add")
switch {
+
case xerrors.Is(err, messagepool.ErrSoftValidationFailure):
fallthrough
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
@@ -362,8 +363,17 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
fallthrough
case xerrors.Is(err, messagepool.ErrNonceTooLow):
fallthrough
+ case xerrors.Is(err, messagepool.ErrNotEnoughFunds):
+ fallthrough
case xerrors.Is(err, messagepool.ErrExistingNonce):
return pubsub.ValidationIgnore
+
+ case xerrors.Is(err, messagepool.ErrMessageTooBig):
+ fallthrough
+ case xerrors.Is(err, messagepool.ErrMessageValueTooHigh):
+ fallthrough
+ case xerrors.Is(err, messagepool.ErrInvalidToAddr):
+ fallthrough
default:
return pubsub.ValidationReject
}
@@ -519,9 +529,8 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
msgCid := idxrMsg.Cid
- var msgInfo *peerMsgInfo
- msgInfo, ok := v.peerCache.Get(minerAddr)
- if !ok {
+ msgInfo, cached := v.peerCache.Get(minerAddr)
+ if !cached {
msgInfo = &peerMsgInfo{}
}
@@ -529,17 +538,17 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
msgInfo.mutex.Lock()
defer msgInfo.mutex.Unlock()
- if ok {
+ var seqno uint64
+ if cached {
// Reject replayed messages.
- seqno := binary.BigEndian.Uint64(msg.Message.GetSeqno())
+ seqno = binary.BigEndian.Uint64(msg.Message.GetSeqno())
if seqno <= msgInfo.lastSeqno {
log.Debugf("ignoring replayed indexer message")
return pubsub.ValidationIgnore
}
- msgInfo.lastSeqno = seqno
}
- if !ok || originPeer != msgInfo.peerID {
+ if !cached || originPeer != msgInfo.peerID {
// Check that the miner ID maps to the peer that sent the message.
err = v.authenticateMessage(ctx, minerAddr, originPeer)
if err != nil {
@@ -548,7 +557,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
return pubsub.ValidationReject
}
msgInfo.peerID = originPeer
- if !ok {
+ if !cached {
// Add msgInfo to cache only after being authenticated. If two
// messages from the same peer are handled concurrently, there is a
// small chance that one msgInfo could replace the other here when
@@ -557,6 +566,9 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
}
}
+ // Update message info cache with the latest message's sequence number.
+ msgInfo.lastSeqno = seqno
+
// See if message needs to be ignored due to rate limiting.
if v.rateLimitPeer(msgInfo, msgCid) {
return pubsub.ValidationIgnore
diff --git a/chain/sub/incoming_test.go b/chain/sub/incoming_test.go
index f54e09049..d8ee99b7f 100644
--- a/chain/sub/incoming_test.go
+++ b/chain/sub/incoming_test.go
@@ -12,10 +12,12 @@ import (
"github.com/ipni/go-libipni/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
+ "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/mocks"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -134,3 +136,123 @@ func TestIndexerMessageValidator_Validate(t *testing.T) {
})
}
}
+
+func TestIdxValidator(t *testing.T) {
+ validCid, err := cid.Decode("QmbpDgg5kRLDgMxS8vPKNFXEcA6D5MC4CkuUdSWDVtHPGK")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ addr, err := address.NewFromString("f01024")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf1, err := addr.MarshalBinary()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ selfPID := "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW"
+ senderPID := "12D3KooWE8yt84RVwW3sFcd6WMjbUdWrZer2YtT4dmtj3dHdahSZ"
+ extraData := buf1
+
+ mc := gomock.NewController(t)
+ node := mocks.NewMockFullNode(mc)
+ node.EXPECT().ChainHead(gomock.Any()).Return(nil, nil).AnyTimes()
+
+ subject := NewIndexerMessageValidator(peer.ID(selfPID), node, node)
+ message := message.Message{
+ Cid: validCid,
+ Addrs: nil,
+ ExtraData: extraData,
+ }
+ buf := bytes.NewBuffer(nil)
+ if err := message.MarshalCBOR(buf); err != nil {
+ t.Fatal(err)
+ }
+
+ topic := "topic"
+
+ privk, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id, err := peer.IDFromPublicKey(privk.GetPublic())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ node.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{PeerId: &id}, nil).AnyTimes()
+
+ pbm := &pb.Message{
+ Data: buf.Bytes(),
+ Topic: &topic,
+ From: []byte(id),
+ Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 2},
+ }
+ validate := subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
+ Message: pbm,
+ ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
+ ValidatorData: nil,
+ })
+ if validate != pubsub.ValidationAccept {
+ t.Error("Expected to receive ValidationAccept")
+ }
+ msgInfo, cached := subject.peerCache.Get(addr)
+ if !cached {
+ t.Fatal("Message info should be in cache")
+ }
+ seqno := msgInfo.lastSeqno
+ msgInfo.rateLimit = nil // prevent interference from rate limiting
+
+ t.Log("Sending DoS msg")
+ privk, _, err = crypto.GenerateKeyPair(crypto.RSA, 2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ id2, err := peer.IDFromPublicKey(privk.GetPublic())
+ if err != nil {
+ t.Fatal(err)
+ }
+ pbm = &pb.Message{
+ Data: buf.Bytes(),
+ Topic: &topic,
+ From: []byte(id2),
+ Seqno: []byte{255, 255, 255, 255, 255, 255, 255, 255},
+ }
+ validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
+ Message: pbm,
+ ReceivedFrom: peer.ID(senderPID),
+ ValidatorData: nil,
+ })
+ if validate != pubsub.ValidationReject {
+ t.Error("Expected to get ValidationReject")
+ }
+ msgInfo, cached = subject.peerCache.Get(addr)
+ if !cached {
+ t.Fatal("Message info should be in cache")
+ }
+ msgInfo.rateLimit = nil // prevent interference from rate limiting
+
+ // Check if DoS is possible.
+ if msgInfo.lastSeqno != seqno {
+ t.Fatal("Sequence number should not have been updated")
+ }
+
+ t.Log("Sending another valid message from miner...")
+ pbm = &pb.Message{
+ Data: buf.Bytes(),
+ Topic: &topic,
+ From: []byte(id),
+ Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 3},
+ }
+ validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
+ Message: pbm,
+ ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
+ ValidatorData: nil,
+ })
+ if validate != pubsub.ValidationAccept {
+ t.Fatal("Did not receive ValidationAccept")
+ }
+}
diff --git a/chain/sub/ratelimit/queue_test.go b/chain/sub/ratelimit/queue_test.go
new file mode 100644
index 000000000..cd66a423e
--- /dev/null
+++ b/chain/sub/ratelimit/queue_test.go
@@ -0,0 +1,61 @@
+package ratelimit
+
+import (
+ "testing"
+)
+
+func TestQueue(t *testing.T) {
+ const size = 3
+ q := &queue{buf: make([]int64, size)}
+
+ if q.len() != 0 {
+ t.Fatalf("q.len() = %d, expect 0", q.len())
+ }
+
+ if q.cap() != size {
+ t.Fatalf("q.cap() = %d, expect %d", q.cap(), size)
+ }
+
+ for i := int64(0); i < int64(size); i++ {
+ err := q.push(i)
+ if err != nil {
+ t.Fatalf("cannot push element %d", i)
+ }
+ }
+
+ if q.len() != size {
+ t.Fatalf("q.len() = %d, expect %d", q.len(), size)
+ }
+
+ err := q.push(int64(size))
+ if err != ErrRateLimitExceeded {
+ t.Fatalf("pushing element beyond capacity should have failed with err: %s, got %s", ErrRateLimitExceeded, err)
+ }
+
+ if q.front() != 0 {
+ t.Fatalf("q.front() = %d, expect 0", q.front())
+ }
+
+ if q.back() != int64(size-1) {
+ t.Fatalf("q.back() = %d, expect %d", q.back(), size-1)
+ }
+
+ popVal := q.pop()
+ if popVal != 0 {
+ t.Fatalf("q.pop() = %d, expect 0", popVal)
+ }
+
+ if q.len() != size-1 {
+ t.Fatalf("q.len() = %d, expect %d", q.len(), size-1)
+ }
+
+ // Testing truncation.
+ threshold := int64(1)
+ q.truncate(threshold)
+ if q.len() != 1 {
+ t.Fatalf("q.len() after truncate = %d, expect 1", q.len())
+ }
+ if q.front() != 2 {
+ t.Fatalf("q.front() after truncate = %d, expect 2", q.front())
+ }
+}
diff --git a/chain/sync.go b/chain/sync.go
index 7830a9771..6341deeeb 100644
--- a/chain/sync.go
+++ b/chain/sync.go
@@ -536,7 +536,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
// At this point we have accepted and synced to the new `maybeHead`
// (`StageSyncComplete`).
- if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
+ if err := syncer.store.RefreshHeaviestTipSet(ctx, maybeHead.Height()); err != nil {
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
span.SetStatus(trace.Status{
Code: 13,
diff --git a/chain/sync_manager.go b/chain/sync_manager.go
index 94017c276..3369c3b5a 100644
--- a/chain/sync_manager.go
+++ b/chain/sync_manager.go
@@ -92,6 +92,7 @@ type syncManager struct {
var _ SyncManager = (*syncManager)(nil)
type peerHead struct {
+ // Note: this doesn't _necessarily_ mean that p's head is ts, just that ts is a tipset that p sent to us
p peer.ID
ts *types.TipSet
}
diff --git a/chain/sync_test.go b/chain/sync_test.go
index a86d42f17..be7759603 100644
--- a/chain/sync_test.go
+++ b/chain/sync_test.go
@@ -11,7 +11,6 @@ import (
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
logging "github.com/ipfs/go-log/v2"
- "github.com/libp2p/go-libp2p/core/peer"
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
"github.com/stretchr/testify/require"
@@ -311,7 +310,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) {
for _, lastB := range lastTs.Blocks {
require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header))
}
- err = cs.PutTipSet(tu.ctx, lastTs.TipSet())
+ err = cs.RefreshHeaviestTipSet(tu.ctx, lastTs.TipSet().Height())
require.NoError(tu.t, err)
tu.genesis = genesis
@@ -344,13 +343,6 @@ func (tu *syncTestUtil) addClientNode() int {
return len(tu.nds) - 1
}
-func (tu *syncTestUtil) pid(n int) peer.ID {
- nal, err := tu.nds[n].NetAddrsListen(tu.ctx)
- require.NoError(tu.t, err)
-
- return nal.ID
-}
-
func (tu *syncTestUtil) connect(from, to int) {
toPI, err := tu.nds[to].NetAddrsListen(tu.ctx)
require.NoError(tu.t, err)
diff --git a/chain/types/blockmsg_test.go b/chain/types/blockmsg_test.go
index 02a622768..ea20f64a7 100644
--- a/chain/types/blockmsg_test.go
+++ b/chain/types/blockmsg_test.go
@@ -7,9 +7,6 @@ import (
)
func TestDecodeBlockMsg(t *testing.T) {
- type args struct {
- b []byte
- }
tests := []struct {
name string
data []byte
diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go
index 90d1a14c5..a9040613f 100644
--- a/chain/types/cbor_gen.go
+++ b/chain/types/cbor_gen.go
@@ -2289,7 +2289,7 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) {
return nil
}
-var lengthBufMessageTrace = []byte{134}
+var lengthBufMessageTrace = []byte{137}
func (t *MessageTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
@@ -2343,6 +2343,23 @@ func (t *MessageTrace) MarshalCBOR(w io.Writer) error {
return err
}
+ // t.GasLimit (uint64) (uint64)
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.GasLimit)); err != nil {
+ return err
+ }
+
+ // t.ReadOnly (bool) (bool)
+ if err := cbg.WriteBool(w, t.ReadOnly); err != nil {
+ return err
+ }
+
+ // t.CodeCid (cid.Cid) (struct)
+
+ if err := cbg.WriteCid(cw, t.CodeCid); err != nil {
+ return xerrors.Errorf("failed to write cid field t.CodeCid: %w", err)
+ }
+
return nil
}
@@ -2365,7 +2382,7 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) {
return fmt.Errorf("cbor input should be of type array")
}
- if extra != 6 {
+ if extra != 9 {
return fmt.Errorf("cbor input had wrong number of fields")
}
@@ -2444,6 +2461,49 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) {
}
t.ParamsCodec = uint64(extra)
+ }
+ // t.GasLimit (uint64) (uint64)
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.GasLimit = uint64(extra)
+
+ }
+ // t.ReadOnly (bool) (bool)
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajOther {
+ return fmt.Errorf("booleans must be major type 7")
+ }
+ switch extra {
+ case 20:
+ t.ReadOnly = false
+ case 21:
+ t.ReadOnly = true
+ default:
+ return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
+ }
+ // t.CodeCid (cid.Cid) (struct)
+
+ {
+
+ c, err := cbg.ReadCid(cr)
+ if err != nil {
+ return xerrors.Errorf("failed to read cid field t.CodeCid: %w", err)
+ }
+
+ t.CodeCid = c
+
}
return nil
}
diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go
index 3e0dd8724..b796e6f56 100644
--- a/chain/types/ethtypes/eth_types.go
+++ b/chain/types/ethtypes/eth_types.go
@@ -18,6 +18,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
@@ -929,3 +930,57 @@ func (e *EthBlockNumberOrHash) UnmarshalJSON(b []byte) error {
return errors.New("invalid block param")
}
+
+type EthTrace struct {
+ Action EthTraceAction `json:"action"`
+ Result EthTraceResult `json:"result"`
+ Subtraces int `json:"subtraces"`
+ TraceAddress []int `json:"traceAddress"`
+ Type string `json:"Type"`
+
+ Parent *EthTrace `json:"-"`
+
+ // if a subtrace makes a call to GetBytecode, we store a pointer to that subtrace here
+ // which we then lookup when checking for delegatecall (InvokeContractDelegate)
+ LastByteCode *EthTrace `json:"-"`
+}
+
+func (t *EthTrace) SetCallType(callType string) {
+ t.Action.CallType = callType
+ t.Type = callType
+}
+
+type EthTraceBlock struct {
+ *EthTrace
+ BlockHash EthHash `json:"blockHash"`
+ BlockNumber int64 `json:"blockNumber"`
+ TransactionHash EthHash `json:"transactionHash"`
+ TransactionPosition int `json:"transactionPosition"`
+}
+
+type EthTraceReplayBlockTransaction struct {
+ Output EthBytes `json:"output"`
+ StateDiff *string `json:"stateDiff"`
+ Trace []*EthTrace `json:"trace"`
+ TransactionHash EthHash `json:"transactionHash"`
+ VmTrace *string `json:"vmTrace"`
+}
+
+type EthTraceAction struct {
+ CallType string `json:"callType"`
+ From EthAddress `json:"from"`
+ To EthAddress `json:"to"`
+ Gas EthUint64 `json:"gas"`
+ Input EthBytes `json:"input"`
+ Value EthBigInt `json:"value"`
+
+ FilecoinMethod abi.MethodNum `json:"-"`
+ FilecoinCodeCid cid.Cid `json:"-"`
+ FilecoinFrom address.Address `json:"-"`
+ FilecoinTo address.Address `json:"-"`
+}
+
+type EthTraceResult struct {
+ GasUsed EthUint64 `json:"gasUsed"`
+ Output EthBytes `json:"output"`
+}
diff --git a/chain/types/ethtypes/rlp.go b/chain/types/ethtypes/rlp.go
index 049ea6fc4..15cee4a22 100644
--- a/chain/types/ethtypes/rlp.go
+++ b/chain/types/ethtypes/rlp.go
@@ -134,7 +134,7 @@ func decodeRLP(data []byte) (res interface{}, consumed int, err error) {
return nil, 0, err
}
totalLen := 1 + strLenInBytes + strLen
- if totalLen > len(data) {
+ if totalLen > len(data) || totalLen < 0 {
return nil, 0, xerrors.Errorf("invalid rlp data: out of bound while parsing string")
}
return data[1+strLenInBytes : totalLen], totalLen, nil
@@ -157,7 +157,12 @@ func decodeLength(data []byte, lenInBytes int) (length int, err error) {
if err := binary.Read(r, binary.BigEndian, &decodedLength); err != nil {
return 0, xerrors.Errorf("invalid rlp data: cannot parse string length: %w", err)
}
- if lenInBytes+int(decodedLength) > len(data) {
+ if decodedLength < 0 {
+ return 0, xerrors.Errorf("invalid rlp data: negative string length")
+ }
+
+ totalLength := lenInBytes + int(decodedLength)
+ if totalLength < 0 || totalLength > len(data) {
return 0, xerrors.Errorf("invalid rlp data: out of bound while parsing list")
}
return int(decodedLength), nil
diff --git a/chain/types/ethtypes/rlp_test.go b/chain/types/ethtypes/rlp_test.go
index bdbedff00..0ce6e15d9 100644
--- a/chain/types/ethtypes/rlp_test.go
+++ b/chain/types/ethtypes/rlp_test.go
@@ -143,6 +143,20 @@ func TestDecodeList(t *testing.T) {
}
}
+func TestDecodeNegativeLength(t *testing.T) {
+ testcases := [][]byte{
+ mustDecodeHex("0xbfffffffffffffff0041424344"),
+ mustDecodeHex("0xc1bFFF1111111111111111"),
+ mustDecodeHex("0xbFFF11111111111111"),
+ mustDecodeHex("0xbf7fffffffffffffff41424344"),
+ }
+
+ for _, tc := range testcases {
+ _, err := DecodeRLP(tc)
+ require.ErrorContains(t, err, "invalid rlp data")
+ }
+}
+
func TestDecodeEncodeTx(t *testing.T) {
testcases := [][]byte{
mustDecodeHex("0xdc82013a0185012a05f2008504a817c8008080872386f26fc1000000c0"),
diff --git a/chain/types/event.go b/chain/types/event.go
index 91b0e95d3..106a120e2 100644
--- a/chain/types/event.go
+++ b/chain/types/event.go
@@ -1,11 +1,6 @@
package types
import (
- "bytes"
- "fmt"
-
- cbg "github.com/whyrusleeping/cbor-gen"
-
"github.com/filecoin-project/go-state-types/abi"
)
@@ -38,24 +33,3 @@ type EventEntry struct {
}
type FilterID [32]byte // compatible with EthHash
-
-// DecodeEvents decodes a CBOR list of CBOR-encoded events.
-func DecodeEvents(input []byte) ([]Event, error) {
- r := bytes.NewReader(input)
- typ, len, err := cbg.NewCborReader(r).ReadHeader()
- if err != nil {
- return nil, fmt.Errorf("failed to read events: %w", err)
- }
- if typ != cbg.MajArray {
- return nil, fmt.Errorf("expected a CBOR list, was major type %d", typ)
- }
- events := make([]Event, 0, len)
- for i := 0; i < int(len); i++ {
- var evt Event
- if err := evt.UnmarshalCBOR(r); err != nil {
- return nil, fmt.Errorf("failed to parse event: %w", err)
- }
- events = append(events, evt)
- }
- return events, nil
-}
diff --git a/chain/types/execresult.go b/chain/types/execresult.go
index 2a25d22e2..4556f7b88 100644
--- a/chain/types/execresult.go
+++ b/chain/types/execresult.go
@@ -4,6 +4,8 @@ import (
"encoding/json"
"time"
+ "github.com/ipfs/go-cid"
+
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
@@ -24,6 +26,9 @@ type MessageTrace struct {
Method abi.MethodNum
Params []byte
ParamsCodec uint64
+ GasLimit uint64
+ ReadOnly bool
+ CodeCid cid.Cid
}
type ReturnTrace struct {
diff --git a/chain/types/vmcontext.go b/chain/types/vmcontext.go
index 83ad81315..bab9c213f 100644
--- a/chain/types/vmcontext.go
+++ b/chain/types/vmcontext.go
@@ -27,24 +27,3 @@ type StateTree interface {
Version() StateTreeVersion
}
-
-type storageWrapper struct {
- s Storage
-}
-
-func (sw *storageWrapper) Put(i cbg.CBORMarshaler) (cid.Cid, error) {
- c, err := sw.s.Put(i)
- if err != nil {
- return cid.Undef, err
- }
-
- return c, nil
-}
-
-func (sw *storageWrapper) Get(c cid.Cid, out cbg.CBORUnmarshaler) error {
- if err := sw.s.Get(c, out); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/chain/vm/fvm.go b/chain/vm/fvm.go
index 7c79972c7..bc4c3a851 100644
--- a/chain/vm/fvm.go
+++ b/chain/vm/fvm.go
@@ -33,6 +33,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/policy"
+ "github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs"
@@ -43,7 +44,7 @@ var _ Interface = (*FVM)(nil)
var _ ffi_cgo.Externs = (*FvmExtern)(nil)
type FvmExtern struct {
- Rand
+ rand.Rand
blockstore.Blockstore
epoch abi.ChainEpoch
lbState LookbackStateGetter
@@ -458,7 +459,7 @@ func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet
}
if vm.returnEvents && len(ret.EventsBytes) > 0 {
- applyRet.Events, err = types.DecodeEvents(ret.EventsBytes)
+ applyRet.Events, err = decodeEvents(ret.EventsBytes)
if err != nil {
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
}
@@ -514,16 +515,12 @@ func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*
}
if vm.returnEvents && len(ret.EventsBytes) > 0 {
- applyRet.Events, err = types.DecodeEvents(ret.EventsBytes)
+ applyRet.Events, err = decodeEvents(ret.EventsBytes)
if err != nil {
return nil, fmt.Errorf("failed to decode events returned by the FVM: %w", err)
}
}
- if ret.ExitCode != 0 {
- return applyRet, fmt.Errorf("implicit message failed with exit code: %d and error: %w", ret.ExitCode, applyRet.ActorErr)
- }
-
return applyRet, nil
}
diff --git a/chain/vm/fvm_util.go b/chain/vm/fvm_util.go
new file mode 100644
index 000000000..9f3dfd869
--- /dev/null
+++ b/chain/vm/fvm_util.go
@@ -0,0 +1,39 @@
+package vm
+
+import (
+ "bytes"
+ "fmt"
+
+ cbg "github.com/whyrusleeping/cbor-gen"
+
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+const MaxEventSliceLength = 6_000_000
+
+// DecodeEvents decodes a CBOR list of CBOR-encoded events.
+func decodeEvents(input []byte) ([]types.Event, error) {
+ r := bytes.NewReader(input)
+ typ, length, err := cbg.NewCborReader(r).ReadHeader()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read events: %w", err)
+ }
+
+ if length > MaxEventSliceLength {
+ log.Errorf("extremely long event slice (len %d) returned, not decoding", length)
+ return nil, nil
+ }
+
+ if typ != cbg.MajArray {
+ return nil, fmt.Errorf("expected a CBOR list, was major type %d", typ)
+ }
+ events := make([]types.Event, 0, length)
+ for i := 0; i < int(length); i++ {
+ var evt types.Event
+ if err := evt.UnmarshalCBOR(r); err != nil {
+ return nil, fmt.Errorf("failed to parse event: %w", err)
+ }
+ events = append(events, evt)
+ }
+ return events, nil
+}
diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go
index a5b108238..355fcea2b 100644
--- a/chain/vm/runtime.go
+++ b/chain/vm/runtime.go
@@ -33,6 +33,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
)
@@ -229,21 +230,35 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool)
}
func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
- res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy)
+ digest, err := rt.vm.rand.GetChainRandomness(rt.ctx, randEpoch)
if err != nil {
panic(aerrors.Fatalf("could not get ticket randomness: %s", err))
}
- return res
+
+ ret, err := rand.DrawRandomnessFromDigest(digest, personalization, randEpoch, entropy)
+
+ if err != nil {
+ panic(aerrors.Fatalf("could not draw ticket randomness: %s", err))
+ }
+
+ return ret
}
func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness {
- res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy)
+ digest, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, randEpoch)
if err != nil {
- panic(aerrors.Fatalf("could not get beacon randomness: %s", err))
+ panic(aerrors.Fatalf("could not get ticket randomness: %s", err))
}
- return res
+
+ ret, err := rand.DrawRandomnessFromDigest(digest, personalization, randEpoch, entropy)
+
+ if err != nil {
+ panic(aerrors.Fatalf("could not draw ticket randomness: %s", err))
+ }
+
+ return ret
}
func (rt *Runtime) NewActorAddress() address.Address {
diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go
index 68dbbb2df..83a07ca2d 100644
--- a/chain/vm/syscalls.go
+++ b/chain/vm/syscalls.go
@@ -70,11 +70,6 @@ type syscallShim struct {
}
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
- var sum abi.PaddedPieceSize
- for _, p := range pieces {
- sum += p.Size
- }
-
commd, err := ffiwrapper.GenerateUnsealedCID(st, pieces)
if err != nil {
log.Errorf("generate data commitment failed: %s", err)
diff --git a/chain/vm/vm.go b/chain/vm/vm.go
index 58afc14bc..ba404ab1f 100644
--- a/chain/vm/vm.go
+++ b/chain/vm/vm.go
@@ -11,7 +11,7 @@ import (
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2"
- mh "github.com/multiformats/go-multihash"
+ "github.com/multiformats/go-multicodec"
cbg "github.com/whyrusleeping/cbor-gen"
"go.opencensus.io/stats"
"go.opencensus.io/trace"
@@ -21,7 +21,6 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
builtin_types "github.com/filecoin-project/go-state-types/builtin"
- "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/network"
@@ -32,13 +31,13 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/account"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
+ "github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/metrics"
)
const MaxCallDepth = 4096
-const CborCodec = 0x51
var (
log = logging.Logger("vm")
@@ -128,7 +127,7 @@ func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error {
func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime {
paramsCodec := uint64(0)
if len(msg.Params) > 0 {
- paramsCodec = CborCodec
+ paramsCodec = uint64(multicodec.Cbor)
}
rt := &Runtime{
ctx: ctx,
@@ -224,7 +223,7 @@ type LegacyVM struct {
buf *blockstore.BufferedBlockstore
blockHeight abi.ChainEpoch
areg *ActorRegistry
- rand Rand
+ rand rand.Rand
circSupplyCalc CircSupplyCalculator
networkVersion network.Version
baseFee abi.TokenAmount
@@ -238,7 +237,7 @@ type VMOpts struct {
StateBase cid.Cid
Epoch abi.ChainEpoch
Timestamp uint64
- Rand Rand
+ Rand rand.Rand
Bstore blockstore.Blockstore
Actors *ActorRegistry
Syscalls SyscallBuilder
@@ -287,11 +286,6 @@ func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {
}, nil
}
-type Rand interface {
- GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
- GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error)
-}
-
type ApplyRet struct {
types.MessageReceipt
ActorErr aerrors.ActorError
@@ -385,7 +379,7 @@ func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtim
retCodec := uint64(0)
if len(ret) > 0 {
- retCodec = CborCodec
+ retCodec = uint64(multicodec.Cbor)
}
rt.executionTrace.MsgRct = types.ReturnTrace{
ExitCode: aerrors.RetCode(err),
@@ -700,15 +694,15 @@ func (vm *LegacyVM) ActorStore(ctx context.Context) adt.Store {
}
func linksForObj(blk block.Block, cb func(cid.Cid)) error {
- switch blk.Cid().Prefix().Codec {
- case cid.DagCBOR:
+ switch multicodec.Code(blk.Cid().Prefix().Codec) {
+ case multicodec.DagCbor:
err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb)
if err != nil {
return xerrors.Errorf("cbg.ScanForLinks: %w", err)
}
return nil
- case cid.Raw:
- // We implicitly have all children of raw blocks.
+ case multicodec.Raw, multicodec.Cbor:
+ // We implicitly have all children of raw/cbor blocks.
return nil
default:
return xerrors.Errorf("vm flush copy method only supports dag cbor")
@@ -808,14 +802,17 @@ func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid,
}
prefix := link.Prefix()
- if prefix.Codec == cid.FilCommitmentSealed || prefix.Codec == cid.FilCommitmentUnsealed {
+ codec := multicodec.Code(prefix.Codec)
+ switch codec {
+ case multicodec.FilCommitmentSealed, cid.FilCommitmentUnsealed:
return
}
// We always have blocks inlined into CIDs, but we may not have their children.
- if prefix.MhType == mh.IDENTITY {
+ if multicodec.Code(prefix.MhType) == multicodec.Identity {
// Unless the inlined block has no children.
- if prefix.Codec == cid.Raw {
+ switch codec {
+ case multicodec.Raw, multicodec.Cbor:
return
}
} else {
diff --git a/cli/sync.go b/cli/sync.go
index 02e4e381f..89d2d94f0 100644
--- a/cli/sync.go
+++ b/cli/sync.go
@@ -273,11 +273,6 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
continue
}
- head, err := napi.ChainHead(ctx)
- if err != nil {
- return err
- }
-
working := -1
for i, ss := range state.ActiveSyncs {
switch ss.Stage {
@@ -332,7 +327,11 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
_ = target // todo: maybe print? (creates a bunch of line wrapping issues with most tipsets)
- if !watch && time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
+ isDone, err := IsSyncDone(ctx, napi)
+ if err != nil {
+ return err
+ }
+ if !watch && isDone {
fmt.Println("\nDone!")
return nil
}
@@ -347,3 +346,11 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
i++
}
}
+
+func IsSyncDone(ctx context.Context, napi v0api.FullNode) (bool, error) {
+ head, err := napi.ChainHead(ctx)
+ if err != nil {
+ return false, err
+ }
+ return time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs), nil
+}
diff --git a/cli/wallet.go b/cli/wallet.go
index 2afe8617b..628d6841d 100644
--- a/cli/wallet.go
+++ b/cli/wallet.go
@@ -7,7 +7,9 @@ import (
"encoding/json"
"fmt"
"os"
+ "os/signal"
"strings"
+ "syscall"
"github.com/urfave/cli/v2"
"golang.org/x/term"
@@ -206,7 +208,12 @@ var walletBalance = &cli.Command{
return err
}
- if balance.Equals(types.NewInt(0)) {
+ inSync, err := IsSyncDone(ctx, api)
+ if err != nil {
+ return err
+ }
+
+ if balance.Equals(types.NewInt(0)) && !inSync {
afmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
} else {
afmt.Printf("%s\n", types.FIL(balance))
@@ -330,6 +337,17 @@ var walletImport = &cli.Command{
if !cctx.Args().Present() || cctx.Args().First() == "-" {
if term.IsTerminal(int(os.Stdin.Fd())) {
fmt.Print("Enter private key(not display in the terminal): ")
+
+ sigCh := make(chan os.Signal, 1)
+ // Notify the channel when SIGINT is received
+ signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
+
+ go func() {
+ <-sigCh
+ fmt.Println("\nInterrupt signal received. Exiting...")
+ os.Exit(1)
+ }()
+
inpdata, err = term.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
return err
diff --git a/cli/wallet_test.go b/cli/wallet_test.go
index dee26018b..eb2c544f0 100644
--- a/cli/wallet_test.go
+++ b/cli/wallet_test.go
@@ -21,6 +21,7 @@ import (
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/mock"
)
func TestWalletNew(t *testing.T) {
@@ -133,6 +134,11 @@ func TestWalletBalance(t *testing.T) {
balance := big.NewInt(1234)
+ // add blocks to the chain
+ first := mock.TipSet(mock.MkBlock(nil, 5, 4))
+ head := mock.TipSet(mock.MkBlock(first, 15, 7))
+
+ mockApi.EXPECT().ChainHead(ctx).Return(head, nil)
mockApi.EXPECT().WalletBalance(ctx, addr).Return(balance, nil)
//stm: @CLI_WALLET_BALANCE_001
diff --git a/cmd/lotus-bench/amt_internal.go b/cmd/lotus-bench/amt_internal.go
new file mode 100644
index 000000000..f0e3035b7
--- /dev/null
+++ b/cmd/lotus-bench/amt_internal.go
@@ -0,0 +1,312 @@
+// Copied from go-amt-ipld https://github.com/filecoin-project/go-amt-ipld/tree/master/internal
+// which for some reason is a go internal package and therefore cannot be imported
+
+package main
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ cid "github.com/ipfs/go-cid"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ xerrors "golang.org/x/xerrors"
+)
+
+type AMTRoot struct {
+ BitWidth uint64
+ Height uint64
+ Count uint64
+ AMTNode AMTNode
+}
+
+type AMTNode struct {
+ Bmap []byte
+ Links []cid.Cid
+ Values []*cbg.Deferred
+}
+
+// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
+
+var _ = xerrors.Errorf
+var _ = cid.Undef
+var _ = math.E
+var _ = sort.Sort
+
+var lengthBufAMTRoot = []byte{132}
+
+func (t *AMTRoot) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+
+ cw := cbg.NewCborWriter(w)
+
+ if _, err := cw.Write(lengthBufAMTRoot); err != nil {
+ return err
+ }
+
+ // t.BitWidth (uint64) (uint64)
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.BitWidth); err != nil {
+ return err
+ }
+
+ // t.Height (uint64) (uint64)
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Height); err != nil {
+ return err
+ }
+
+ // t.Count (uint64) (uint64)
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Count); err != nil {
+ return err
+ }
+
+ // t.AMTNode (internal.AMTNode) (struct)
+ if err := t.AMTNode.MarshalCBOR(cw); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *AMTRoot) UnmarshalCBOR(r io.Reader) (err error) {
+ *t = AMTRoot{}
+
+ cr := cbg.NewCborReader(r)
+
+ maj, extra, err := cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ }()
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 4 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.BitWidth (uint64) (uint64)
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.BitWidth = extra
+
+ }
+ // t.Height (uint64) (uint64)
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.Height = extra
+
+ }
+ // t.Count (uint64) (uint64)
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.Count = extra
+
+ }
+ // t.AMTNode (internal.AMTNode) (struct)
+
+ {
+
+ if err := t.AMTNode.UnmarshalCBOR(cr); err != nil {
+ return xerrors.Errorf("unmarshaling t.AMTNode: %w", err)
+ }
+
+ }
+ return nil
+}
+
+var lengthBufAMTNode = []byte{131}
+
+func (t *AMTNode) MarshalCBOR(w io.Writer) error {
+ if t == nil {
+ _, err := w.Write(cbg.CborNull)
+ return err
+ }
+
+ cw := cbg.NewCborWriter(w)
+
+ if _, err := cw.Write(lengthBufAMTNode); err != nil {
+ return err
+ }
+
+ // t.Bmap ([]uint8) (slice)
+ if len(t.Bmap) > cbg.ByteArrayMaxLen {
+ return xerrors.Errorf("Byte array in field t.Bmap was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Bmap))); err != nil {
+ return err
+ }
+
+ if _, err := cw.Write(t.Bmap[:]); err != nil {
+ return err
+ }
+
+ // t.Links ([]cid.Cid) (slice)
+ if len(t.Links) > cbg.MaxLength {
+ return xerrors.Errorf("Slice value in field t.Links was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Links))); err != nil {
+ return err
+ }
+ for _, v := range t.Links {
+ if err := cbg.WriteCid(w, v); err != nil {
+ return xerrors.Errorf("failed writing cid field t.Links: %w", err)
+ }
+ }
+
+ // t.Values ([]*typegen.Deferred) (slice)
+ if len(t.Values) > cbg.MaxLength {
+ return xerrors.Errorf("Slice value in field t.Values was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Values))); err != nil {
+ return err
+ }
+ for _, v := range t.Values {
+ if err := v.MarshalCBOR(cw); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *AMTNode) UnmarshalCBOR(r io.Reader) (err error) {
+ *t = AMTNode{}
+
+ cr := cbg.NewCborReader(r)
+
+ maj, extra, err := cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ }()
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("cbor input should be of type array")
+ }
+
+ if extra != 3 {
+ return fmt.Errorf("cbor input had wrong number of fields")
+ }
+
+ // t.Bmap ([]uint8) (slice)
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.ByteArrayMaxLen {
+ return fmt.Errorf("t.Bmap: byte array too large (%d)", extra)
+ }
+ if maj != cbg.MajByteString {
+ return fmt.Errorf("expected byte array")
+ }
+
+ if extra > 0 {
+ t.Bmap = make([]uint8, extra)
+ }
+
+ if _, err := io.ReadFull(cr, t.Bmap[:]); err != nil {
+ return err
+ }
+ // t.Links ([]cid.Cid) (slice)
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("t.Links: array too large (%d)", extra)
+ }
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("expected cbor array")
+ }
+
+ if extra > 0 {
+ t.Links = make([]cid.Cid, extra)
+ }
+
+ for i := 0; i < int(extra); i++ {
+
+ c, err := cbg.ReadCid(cr)
+ if err != nil {
+ return xerrors.Errorf("reading cid field t.Links failed: %w", err)
+ }
+ t.Links[i] = c
+ }
+
+ // t.Values ([]*typegen.Deferred) (slice)
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+
+ if extra > cbg.MaxLength {
+ return fmt.Errorf("t.Values: array too large (%d)", extra)
+ }
+
+ if maj != cbg.MajArray {
+ return fmt.Errorf("expected cbor array")
+ }
+
+ if extra > 0 {
+ t.Values = make([]*cbg.Deferred, extra)
+ }
+
+ for i := 0; i < int(extra); i++ {
+
+ var v cbg.Deferred
+ if err := v.UnmarshalCBOR(cr); err != nil {
+ return err
+ }
+
+ t.Values[i] = &v
+ }
+
+ return nil
+}
diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go
index 9f43d9538..95b91054a 100644
--- a/cmd/lotus-bench/import.go
+++ b/cmd/lotus-bench/import.go
@@ -497,21 +497,6 @@ type Invocation struct {
const GasPerNs = 10
-func countGasCosts(et *types.ExecutionTrace) int64 {
- var cgas int64
-
- for _, gc := range et.GasCharges {
- cgas += gc.ComputeGas
- }
-
- for _, sub := range et.Subcalls {
- c := countGasCosts(&sub) //nolint
- cgas += c
- }
-
- return cgas
-}
-
type stats struct {
timeTaken meanVar
gasRatio meanVar
diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go
index 6e7e274f2..fc484c4e3 100644
--- a/cmd/lotus-bench/main.go
+++ b/cmd/lotus-bench/main.go
@@ -1,6 +1,7 @@
package main
import (
+ "bytes"
"context"
"encoding/json"
"fmt"
@@ -8,9 +9,16 @@ import (
"math/rand"
"os"
"path/filepath"
+ "sync"
"time"
"github.com/docker/go-units"
+ "github.com/ipfs/boxo/blockservice"
+ "github.com/ipfs/boxo/ipld/merkledag"
+ "github.com/ipfs/go-cid"
+ offline "github.com/ipfs/go-ipfs-exchange-offline"
+ cbor "github.com/ipfs/go-ipld-cbor"
+ format "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
"github.com/minio/blake2b-simd"
"github.com/mitchellh/go-homedir"
@@ -20,10 +28,14 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
prooftypes "github.com/filecoin-project/go-state-types/proof"
+ adt "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
@@ -104,6 +116,7 @@ func main() {
DisableSliceFlagSeparator: true,
Commands: []*cli.Command{
proveCmd,
+ amtBenchCmd,
sealBenchCmd,
simpleCmd,
importBenchCmd,
@@ -117,6 +130,211 @@ func main() {
}
}
+type amtStatCollector struct {
+ ds format.NodeGetter
+ walk func(format.Node) ([]*format.Link, error)
+
+ statsLk sync.Mutex
+ totalAMTLinks int
+ totalAMTValues int
+ totalAMTLinkNodes int
+ totalAMTValueNodes int
+ totalAMTLinkNodeSize int
+ totalAMTValueNodeSize int
+}
+
+func (asc *amtStatCollector) String() string {
+ asc.statsLk.Lock()
+ defer asc.statsLk.Unlock()
+
+ str := "\n------------\n"
+ str += fmt.Sprintf("Link Count: %d\n", asc.totalAMTLinks)
+ str += fmt.Sprintf("Value Count: %d\n", asc.totalAMTValues)
+ str += fmt.Sprintf("%d link nodes %d bytes\n", asc.totalAMTLinkNodes, asc.totalAMTLinkNodeSize)
+ str += fmt.Sprintf("%d value nodes %d bytes\n", asc.totalAMTValueNodes, asc.totalAMTValueNodeSize)
+ str += fmt.Sprintf("Total bytes: %d\n------------\n", asc.totalAMTLinkNodeSize+asc.totalAMTValueNodeSize)
+ return str
+}
+
+func (asc *amtStatCollector) record(ctx context.Context, nd format.Node) error {
+ size, err := nd.Size()
+ if err != nil {
+ return err
+ }
+
+ var node AMTNode
+ if err := node.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil {
+ // try to deserialize root
+ var root AMTRoot
+ if err := root.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil {
+ return err
+ }
+ node = root.AMTNode
+ }
+
+ asc.statsLk.Lock()
+ defer asc.statsLk.Unlock()
+
+ link := len(node.Links) > 0
+ value := len(node.Values) > 0
+
+ if link {
+ asc.totalAMTLinks += len(node.Links)
+ asc.totalAMTLinkNodes++
+ asc.totalAMTLinkNodeSize += int(size)
+ } else if value {
+ asc.totalAMTValues += len(node.Values)
+ asc.totalAMTValueNodes++
+ asc.totalAMTValueNodeSize += int(size)
+ } else {
+ return xerrors.Errorf("unexpected AMT node %x: neither link nor value", nd.RawData())
+ }
+
+ return nil
+}
+
+func (asc *amtStatCollector) walkLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) {
+ nd, err := asc.ds.Get(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := asc.record(ctx, nd); err != nil {
+ return nil, err
+ }
+
+ return asc.walk(nd)
+}
+
+func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
+ for _, link := range nd.Links() {
+ if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed {
+ continue
+ }
+ out = append(out, link)
+ }
+ return out, nil
+}
+
+var amtBenchCmd = &cli.Command{
+ Name: "amt",
+ Usage: "Benchmark AMT churn",
+ Flags: []cli.Flag{
+ &cli.IntFlag{
+ Name: "rounds",
+ Usage: "rounds of churn to measure",
+ Value: 1,
+ },
+ &cli.IntFlag{
+ Name: "interval",
+ Usage: "AMT idx interval for churning values",
+ Value: 2880,
+ },
+ &cli.IntFlag{
+ Name: "bitwidth",
+ Usage: "AMT bitwidth",
+ Value: 6,
+ },
+ },
+ Action: func(c *cli.Context) error {
+ bs := blockstore.NewMemory()
+ ctx := c.Context
+ store := adt.WrapStore(ctx, cbor.NewCborStore(bs))
+
+ // Setup in memory blockstore
+ bitwidth := c.Int("bitwidth")
+ array, err := adt.MakeEmptyArray(store, bitwidth)
+ if err != nil {
+ return err
+ }
+
+ // Using motivating empirical example: market actor states AMT
+ // Create 40,000,000 states for realistic workload
+ fmt.Printf("Populating AMT\n")
+ for i := 0; i < 40000000; i++ {
+ if err := array.Set(uint64(i), &market.DealState{
+ SectorStartEpoch: abi.ChainEpoch(2000000 + i),
+ LastUpdatedEpoch: abi.ChainEpoch(-1),
+ SlashEpoch: -1,
+ VerifiedClaim: verifreg.AllocationId(i),
+ }); err != nil {
+ return err
+ }
+ }
+
+ r, err := array.Root()
+ if err != nil {
+ return err
+ }
+
+ // Measure ratio of internal / leaf nodes / sizes
+ dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ asc := &amtStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ fmt.Printf("Measuring AMT\n")
+ seen := cid.NewSet()
+ if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", asc)
+
+ // Overwrite ids with idx % interval: one epoch of market cron
+ rounds := c.Int("rounds")
+ interval := c.Int("interval")
+
+ fmt.Printf("Overwrite 1 out of %d values for %d rounds\n", interval, rounds)
+ array, err = adt.AsArray(store, r, bitwidth)
+ if err != nil {
+ return err
+ }
+ roots := make([]cid.Cid, rounds)
+ for j := 0; j < rounds; j++ {
+ if j%10 == 0 {
+ fmt.Printf("round: %d\n", j)
+ }
+ for i := j; i < 40000000; i += interval {
+ if i%interval == j {
+ if err := array.Set(uint64(i), &market.DealState{
+ SectorStartEpoch: abi.ChainEpoch(2000000 + i),
+ LastUpdatedEpoch: abi.ChainEpoch(1),
+ SlashEpoch: -1,
+ VerifiedClaim: verifreg.AllocationId(i),
+ }); err != nil {
+ return err
+ }
+ }
+ }
+ roots[j], err = array.Root()
+ if err != nil {
+ return err
+ }
+
+ }
+
+ // Measure churn
+ dag = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ asc = &amtStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ fmt.Printf("Measuring %d rounds of churn\n", rounds)
+
+ for _, r := range roots {
+ if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil {
+ return err
+ }
+ }
+
+ fmt.Printf("%s\n", asc)
+ return nil
+ },
+}
+
var sealBenchCmd = &cli.Command{
Name: "sealing",
Usage: "Benchmark seal and winning post and window post",
diff --git a/cmd/lotus-bench/simple.go b/cmd/lotus-bench/simple.go
index a742b0fb3..6e1dea0db 100644
--- a/cmd/lotus-bench/simple.go
+++ b/cmd/lotus-bench/simple.go
@@ -120,6 +120,11 @@ p: pvC0JBrEyUqtIIUvB2UUx/2a24c3Cvnu6AZ0D3IMBYAu...
type benchSectorProvider map[storiface.SectorFileType]string
+func (b benchSectorProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ // there's no copying in this context
+ return b.AcquireSector(ctx, id, existing, allocate, ptype)
+}
+
func (b benchSectorProvider) AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
out := storiface.SectorPaths{
ID: id.ID,
diff --git a/cmd/lotus-shed/block.go b/cmd/lotus-shed/block.go
new file mode 100644
index 000000000..814eef3fd
--- /dev/null
+++ b/cmd/lotus-shed/block.go
@@ -0,0 +1,42 @@
+package main
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+
+ "github.com/urfave/cli/v2"
+
+ "github.com/filecoin-project/lotus/chain/types"
+ lcli "github.com/filecoin-project/lotus/cli"
+)
+
+var blockCmd = &cli.Command{
+ Name: "block",
+ Usage: "Output decoded block header in readeble form",
+ ArgsUsage: "[block header hex]",
+ Action: func(cctx *cli.Context) error {
+ if cctx.NArg() != 1 {
+ return lcli.IncorrectNumArgs(cctx)
+ }
+
+ b, err := hex.DecodeString(cctx.Args().First())
+ if err != nil {
+ return err
+ }
+
+ var blk types.BlockHeader
+ if err := blk.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
+ return err
+ }
+
+ jb, err := json.MarshalIndent(blk, "", " ")
+ if err != nil {
+ return err
+ }
+
+ fmt.Println(string(jb))
+ return nil
+ },
+}
diff --git a/cmd/lotus-shed/cron-count.go b/cmd/lotus-shed/cron-count.go
index 2b8dc8ebf..9741792ec 100644
--- a/cmd/lotus-shed/cron-count.go
+++ b/cmd/lotus-shed/cron-count.go
@@ -1,14 +1,24 @@
package main
import (
+ "encoding/json"
"fmt"
+ "os"
+ "github.com/ipfs/go-cid"
+ ipldcbor "github.com/ipfs/go-ipld-cbor"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-bitfield"
+ "github.com/filecoin-project/go-state-types/abi"
+ miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
+ "github.com/filecoin-project/go-state-types/builtin/v11/util/adt"
+ "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli"
)
@@ -17,6 +27,245 @@ var cronWcCmd = &cli.Command{
Description: "cron stats",
Subcommands: []*cli.Command{
minerDeadlineCronCountCmd,
+ minerDeadlinePartitionMeasurementCmd,
+ },
+}
+
+type DeadlineRef struct {
+ To string
+ Height abi.ChainEpoch
+ Gas json.RawMessage
+}
+
+type DeadlineSummary struct {
+ Partitions []PartitionSummary
+ PreCommitExpiry PreCommitExpiry
+ VestingDiff VestingDiff
+}
+
+type PreCommitExpiry struct {
+ Expired []uint64
+}
+
+type VestingDiff struct {
+ PrevTableSize int
+ NewTableSize int
+}
+
+type PartitionSummary struct {
+ Live int
+ Dead int
+ Faulty int
+ Diff PartitionDiff
+}
+
+type PartitionDiff struct {
+ Faulted int
+ Recovered int
+ Killed int
+}
+
+var minerDeadlinePartitionMeasurementCmd = &cli.Command{
+ Name: "deadline-summary",
+ Description: "",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "json",
+ Usage: "read input as json",
+ Value: true,
+ },
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset state to search on (pass comma separated array of cids)",
+ },
+ },
+ Action: func(c *cli.Context) error {
+ // read in values to process
+ if !c.Bool("json") {
+ return xerrors.Errorf("unsupported non json input format")
+ }
+ var refStream []DeadlineRef
+ if err := json.NewDecoder(os.Stdin).Decode(&refStream); err != nil {
+ return xerrors.Errorf("failed to parse input: %w", err)
+ }
+
+ // go from height and sp addr to deadline partition data
+ n, acloser, err := lcli.GetFullNodeAPI(c)
+ if err != nil {
+ return err
+ }
+ defer acloser()
+ ctx := lcli.ReqContext(c)
+
+ bs := ReadOnlyAPIBlockstore{n}
+ adtStore := adt.WrapStore(ctx, ipldcbor.NewCborStore(&bs))
+
+ dSummaries := make([]DeadlineSummary, len(refStream))
+ for j, ref := range refStream {
+ // get miner's deadline
+ tsBefore, err := n.ChainGetTipSetByHeight(ctx, ref.Height, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("failed to get tipset at epoch: %d: %w", ref.Height, err)
+ }
+ tsAfter, err := n.ChainGetTipSetByHeight(ctx, ref.Height+1, types.EmptyTSK)
+ if err != nil {
+ return xerrors.Errorf("failed to get tipset at epoch %d: %w", ref.Height, err)
+ }
+ addr, err := address.NewFromString(ref.To)
+ if err != nil {
+ return xerrors.Errorf("faield to get address from input string: %w", err)
+ }
+ dline, err := n.StateMinerProvingDeadline(ctx, addr, tsBefore.Key())
+ if err != nil {
+ return xerrors.Errorf("failed to read proving deadline: %w", err)
+ }
+
+ // iterate through all partitions at epoch of processing
+ var pSummaries []PartitionSummary
+ psBefore, err := n.StateMinerPartitions(ctx, addr, dline.Index, tsBefore.Key())
+ if err != nil {
+ return xerrors.Errorf("failed to get partitions: %w", err)
+ }
+ psAfter, err := n.StateMinerPartitions(ctx, addr, dline.Index, tsAfter.Key())
+ if err != nil {
+ return xerrors.Errorf("failed to get partitions: %w", err)
+ }
+ if len(psBefore) != len(psAfter) {
+ return xerrors.Errorf("faield")
+ }
+
+ type partitionCount struct {
+ live int
+ dead int
+ faulty int
+ recovering int
+ }
+ countPartition := func(p api.Partition) (partitionCount, error) {
+ liveSectors, err := p.LiveSectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count live sectors in partition: %w", err)
+ }
+ allSectors, err := p.AllSectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count all sectors in partition: %w", err)
+ }
+ faultySectors, err := p.FaultySectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count faulty sectors in partition: %w", err)
+ }
+ recoveringSectors, err := p.RecoveringSectors.All(abi.MaxSectorNumber)
+ if err != nil {
+ return partitionCount{}, xerrors.Errorf("failed to count recovering sectors in partition: %w", err)
+ }
+
+ return partitionCount{
+ live: len(liveSectors),
+ dead: len(allSectors) - len(liveSectors),
+ faulty: len(faultySectors),
+ recovering: len(recoveringSectors),
+ }, nil
+ }
+
+ countVestingTable := func(table cid.Cid) (int, error) {
+ var vestingTable miner11.VestingFunds
+ if err := adtStore.Get(ctx, table, &vestingTable); err != nil {
+ return 0, err
+ }
+ return len(vestingTable.Funds), nil
+ }
+
+ for i := 0; i < len(psBefore); i++ {
+ cntBefore, err := countPartition(psBefore[i])
+ if err != nil {
+ return err
+ }
+ cntAfter, err := countPartition(psAfter[i])
+ if err != nil {
+ return err
+ }
+ pSummaries = append(pSummaries, PartitionSummary{
+ Live: cntBefore.live,
+ Dead: cntBefore.dead,
+ Faulty: cntBefore.faulty,
+ Diff: PartitionDiff{
+ Faulted: cntAfter.faulty - cntBefore.faulty,
+ Recovered: cntBefore.recovering - cntAfter.recovering,
+ Killed: cntAfter.dead - cntBefore.dead,
+ },
+ })
+ }
+
+ // Precommit and vesting table data
+ // Before
+ aBefore, err := n.StateGetActor(ctx, addr, tsBefore.Key())
+ if err != nil {
+ return err
+ }
+ var st miner11.State
+ err = adtStore.Get(ctx, aBefore.Head, &st)
+ if err != nil {
+ return err
+ }
+ expiryQArray, err := adt.AsArray(adtStore, st.PreCommittedSectorsCleanUp, miner11.PrecommitCleanUpAmtBitwidth)
+ if err != nil {
+ return err
+ }
+ var sectorsBf bitfield.BitField
+ var accumulator []uint64
+ h := ref.Height
+ if err := expiryQArray.ForEach(§orsBf, func(i int64) error {
+ if abi.ChainEpoch(i) > h {
+ return nil
+ }
+ sns, err := sectorsBf.All(abi.MaxSectorNumber)
+ if err != nil {
+ return err
+ }
+ accumulator = append(accumulator, sns...)
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ vestingBefore, err := countVestingTable(st.VestingFunds)
+ if err != nil {
+ return err
+ }
+
+ // After
+ aAfter, err := n.StateGetActor(ctx, addr, tsAfter.Key())
+ if err != nil {
+ return err
+ }
+ var stAfter miner11.State
+ err = adtStore.Get(ctx, aAfter.Head, &stAfter)
+ if err != nil {
+ return err
+ }
+
+ vestingAfter, err := countVestingTable(stAfter.VestingFunds)
+ if err != nil {
+ return err
+ }
+
+ dSummaries[j] = DeadlineSummary{
+ Partitions: pSummaries,
+ PreCommitExpiry: PreCommitExpiry{
+ Expired: accumulator,
+ },
+ VestingDiff: VestingDiff{
+ PrevTableSize: vestingBefore,
+ NewTableSize: vestingAfter,
+ },
+ }
+
+ }
+
+ // output partition info
+ if err := json.NewEncoder(os.Stdout).Encode(dSummaries); err != nil {
+ return err
+ }
+ return nil
},
}
diff --git a/cmd/lotus-shed/election.go b/cmd/lotus-shed/election.go
index 7ac74de48..23c533c31 100644
--- a/cmd/lotus-shed/election.go
+++ b/cmd/lotus-shed/election.go
@@ -219,7 +219,7 @@ func backTestWinner(ctx context.Context, miner address.Address, round abi.ChainE
brand = bvals[len(bvals)-1]
}
- winner, err := gen.IsRoundWinner(ctx, ts, round, miner, brand, mbi, api)
+ winner, err := gen.IsRoundWinner(ctx, round, miner, brand, mbi, api)
if err != nil {
return nil, xerrors.Errorf("failed to check if we win next round: %w", err)
}
diff --git a/cmd/lotus-shed/gas-estimation.go b/cmd/lotus-shed/gas-estimation.go
index e02e2a722..5dc048f56 100644
--- a/cmd/lotus-shed/gas-estimation.go
+++ b/cmd/lotus-shed/gas-estimation.go
@@ -16,7 +16,6 @@ import (
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
@@ -100,15 +99,11 @@ var gasTraceCmd = &cli.Command{
return err
}
- dcs := build.DrandConfigSchedule()
- shd := beacon.Schedule{}
- for _, dc := range dcs {
- bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
- if err != nil {
- return xerrors.Errorf("creating drand beacon: %w", err)
- }
- shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), MAINNET_GENESIS_TIME, nil)
+ if err != nil {
+ return err
}
+
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
@@ -200,14 +195,9 @@ var replayOfflineCmd = &cli.Command{
return err
}
- dcs := build.DrandConfigSchedule()
- shd := beacon.Schedule{}
- for _, dc := range dcs {
- bc, err := drand.NewDrandBeacon(MAINNET_GENESIS_TIME, build.BlockDelaySecs, nil, dc.Config)
- if err != nil {
- return xerrors.Errorf("creating drand beacon: %w", err)
- }
- shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), MAINNET_GENESIS_TIME, nil)
+ if err != nil {
+ return err
}
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
diff --git a/cmd/lotus-shed/indexes.go b/cmd/lotus-shed/indexes.go
index 24a9a817f..be7d43e05 100644
--- a/cmd/lotus-shed/indexes.go
+++ b/cmd/lotus-shed/indexes.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"database/sql"
"fmt"
"path"
@@ -8,12 +9,18 @@ import (
"strings"
"github.com/mitchellh/go-homedir"
+ "github.com/multiformats/go-varint"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
+ "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ builtintypes "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/crypto"
+ "github.com/filecoin-project/go-state-types/exitcode"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
lcli "github.com/filecoin-project/lotus/cli"
)
@@ -31,6 +38,291 @@ var indexesCmd = &cli.Command{
withCategory("msgindex", backfillMsgIndexCmd),
withCategory("msgindex", pruneMsgIndexCmd),
withCategory("txhash", backfillTxHashCmd),
+ withCategory("events", backfillEventsCmd),
+ },
+}
+
+var backfillEventsCmd = &cli.Command{
+ Name: "backfill-events",
+ Usage: "Backfill the events.db for a number of epochs starting from a specified height",
+ Flags: []cli.Flag{
+ &cli.UintFlag{
+ Name: "from",
+ Value: 0,
+ Usage: "the tipset height to start backfilling from (0 is head of chain)",
+ },
+ &cli.IntFlag{
+ Name: "epochs",
+ Value: 2000,
+ Usage: "the number of epochs to backfill",
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ srv, err := lcli.GetFullNodeServices(cctx)
+ if err != nil {
+ return err
+ }
+ defer srv.Close() //nolint:errcheck
+
+ api := srv.FullNodeAPI()
+ ctx := lcli.ReqContext(cctx)
+
+ // currTs will be the tipset where we start backfilling from
+ currTs, err := api.ChainHead(ctx)
+ if err != nil {
+ return err
+ }
+ if cctx.IsSet("from") {
+ // we need to fetch the tipset after the epoch being specified since we will need to advance currTs
+ currTs, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Int("from")+1), currTs.Key())
+ if err != nil {
+ return err
+ }
+ }
+
+ // advance currTs by one epoch and maintain prevTs as the previous tipset (this allows us to easily use the ChainGetParentMessages/Receipt API)
+ prevTs := currTs
+ currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
+ if err != nil {
+ return fmt.Errorf("failed to load tipset %s: %w", prevTs.Parents(), err)
+ }
+
+ epochs := cctx.Int("epochs")
+
+ basePath, err := homedir.Expand(cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+
+ dbPath := path.Join(basePath, "sqlite", "events.db")
+ db, err := sql.Open("sqlite3", dbPath)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ err := db.Close()
+ if err != nil {
+ fmt.Printf("ERROR: closing db: %s", err)
+ }
+ }()
+
+ addressLookups := make(map[abi.ActorID]address.Address)
+
+ resolveFn := func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) {
+ // we only want to match using f4 addresses
+ idAddr, err := address.NewIDAddress(uint64(emitter))
+ if err != nil {
+ return address.Undef, false
+ }
+
+ actor, err := api.StateGetActor(ctx, idAddr, ts.Key())
+ if err != nil || actor.Address == nil {
+ return address.Undef, false
+ }
+
+ // if robust address is not f4 then we won't match against it so bail early
+ if actor.Address.Protocol() != address.Delegated {
+ return address.Undef, false
+ }
+
+ // we have an f4 address, make sure it's assigned by the EAM
+ if namespace, _, err := varint.FromUvarint(actor.Address.Payload()); err != nil || namespace != builtintypes.EthereumAddressManagerActorID {
+ return address.Undef, false
+ }
+ return *actor.Address, true
+ }
+
+ isIndexedValue := func(b uint8) bool {
+ // currently we mark the full entry as indexed if either the key
+ // or the value are indexed; in the future we will need finer-grained
+ // management of indices
+ return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0
+ }
+
+ var totalEventsAffected int64
+ var totalEntriesAffected int64
+
+ processHeight := func(ctx context.Context, cnt int, msgs []lapi.Message, receipts []*types.MessageReceipt) error {
+ tx, err := db.BeginTx(ctx, nil)
+ if err != nil {
+ return fmt.Errorf("failed to start transaction: %w", err)
+ }
+ defer tx.Rollback() //nolint:errcheck
+
+ stmtSelectEvent, err := tx.Prepare("SELECT MAX(id) from event WHERE height=? AND tipset_key=? and tipset_key_cid=? and emitter_addr=? and event_index=? and message_cid=? and message_index=? and reverted=false")
+ if err != nil {
+ return err
+ }
+ stmtEvent, err := tx.Prepare("INSERT INTO event (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)")
+ if err != nil {
+ return err
+ }
+ stmtEntry, err := tx.Prepare("INSERT INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)")
+ if err != nil {
+ return err
+ }
+
+ var eventsAffected int64
+ var entriesAffected int64
+
+ // loop over each message receipt and backfill the events
+ for idx, receipt := range receipts {
+ msg := msgs[idx]
+
+ if receipt.ExitCode != exitcode.Ok {
+ continue
+ }
+
+ if receipt.EventsRoot == nil {
+ continue
+ }
+
+ events, err := api.ChainGetEvents(ctx, *receipt.EventsRoot)
+ if err != nil {
+ return fmt.Errorf("failed to load events for tipset %s: %w", currTs, err)
+ }
+
+ for eventIdx, event := range events {
+ addr, found := addressLookups[event.Emitter]
+ if !found {
+ var ok bool
+ addr, ok = resolveFn(ctx, event.Emitter, currTs)
+ if !ok {
+ // not an address we will be able to match against
+ continue
+ }
+ addressLookups[event.Emitter] = addr
+ }
+
+ tsKeyCid, err := currTs.Key().Cid()
+ if err != nil {
+ return fmt.Errorf("failed to get tipset key cid: %w", err)
+ }
+
+ // select the highest event id that exists in database, or null if none exists
+ var entryID sql.NullInt64
+ err = stmtSelectEvent.QueryRow(
+ currTs.Height(),
+ currTs.Key().Bytes(),
+ tsKeyCid.Bytes(),
+ addr.Bytes(),
+ eventIdx,
+ msg.Cid.Bytes(),
+ idx,
+ ).Scan(&entryID)
+ if err != nil {
+ return fmt.Errorf("error checking if event exists: %w", err)
+ }
+
+ // we already have this event
+ if entryID.Valid {
+ continue
+ }
+
+ // event does not exist, lets backfill it
+ res, err := tx.Stmt(stmtEvent).Exec(
+ currTs.Height(), // height
+ currTs.Key().Bytes(), // tipset_key
+ tsKeyCid.Bytes(), // tipset_key_cid
+ addr.Bytes(), // emitter_addr
+ eventIdx, // event_index
+ msg.Cid.Bytes(), // message_cid
+ idx, // message_index
+ false, // reverted
+ )
+ if err != nil {
+ return fmt.Errorf("error inserting event: %w", err)
+ }
+
+ entryID.Int64, err = res.LastInsertId()
+ if err != nil {
+ return fmt.Errorf("could not get last insert id: %w", err)
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("could not get rows affected: %w", err)
+ }
+ eventsAffected += rowsAffected
+
+ // backfill the event entries
+ for _, entry := range event.Entries {
+ _, err := tx.Stmt(stmtEntry).Exec(
+ entryID.Int64, // event_id
+ isIndexedValue(entry.Flags), // indexed
+ []byte{entry.Flags}, // flags
+ entry.Key, // key
+ entry.Codec, // codec
+ entry.Value, // value
+ )
+ if err != nil {
+ return fmt.Errorf("error inserting entry: %w", err)
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ return fmt.Errorf("could not get rows affected: %w", err)
+ }
+ entriesAffected += rowsAffected
+ }
+ }
+ }
+
+ err = tx.Commit()
+ if err != nil {
+ return fmt.Errorf("failed to commit transaction: %w", err)
+ }
+
+ log.Infof("[%d] backfilling actor events epoch:%d, eventsAffected:%d, entriesAffected:%d", cnt, currTs.Height(), eventsAffected, entriesAffected)
+
+ totalEventsAffected += eventsAffected
+ totalEntriesAffected += entriesAffected
+
+ return nil
+ }
+
+ for i := 0; i < epochs; i++ {
+ select {
+ case <-ctx.Done():
+ return nil
+ default:
+ }
+
+ blockCid := prevTs.Blocks()[0].Cid()
+
+ // get messages for the parent of the previous tipset (which will be currTs)
+ msgs, err := api.ChainGetParentMessages(ctx, blockCid)
+ if err != nil {
+ return fmt.Errorf("failed to get parent messages for block %s: %w", blockCid, err)
+ }
+
+ // get receipts for the parent of the previous tipset (which will be currTs)
+ receipts, err := api.ChainGetParentReceipts(ctx, blockCid)
+ if err != nil {
+ return fmt.Errorf("failed to get parent receipts for block %s: %w", blockCid, err)
+ }
+
+ if len(msgs) != len(receipts) {
+ return fmt.Errorf("mismatched in message and receipt count: %d != %d", len(msgs), len(receipts))
+ }
+
+ err = processHeight(ctx, i, msgs, receipts)
+ if err != nil {
+ return err
+ }
+
+ // advance prevTs and currTs up the chain
+ prevTs = currTs
+ currTs, err = api.ChainGetTipSet(ctx, currTs.Parents())
+ if err != nil {
+ return fmt.Errorf("failed to load tipset %s: %w", currTs, err)
+ }
+ }
+
+ log.Infof("backfilling events complete, totalEventsAffected:%d, totalEntriesAffected:%d", totalEventsAffected, totalEntriesAffected)
+
+ return nil
},
}
diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go
index 13ab6af0d..aab3d1ef3 100644
--- a/cmd/lotus-shed/main.go
+++ b/cmd/lotus-shed/main.go
@@ -23,6 +23,7 @@ func main() {
local := []*cli.Command{
addressCmd,
statActorCmd,
+ statSnapshotCmd,
statObjCmd,
base64Cmd,
base32Cmd,
@@ -89,6 +90,7 @@ func main() {
indexesCmd,
FevmAnalyticsCmd,
mismatchesCmd,
+ blockCmd,
}
app := &cli.App{
diff --git a/cmd/lotus-shed/msg.go b/cmd/lotus-shed/msg.go
index 062e077df..35f8eed35 100644
--- a/cmd/lotus-shed/msg.go
+++ b/cmd/lotus-shed/msg.go
@@ -26,6 +26,12 @@ var msgCmd = &cli.Command{
Aliases: []string{"msg"},
Usage: "Translate message between various formats",
ArgsUsage: "Message in any form",
+ Flags: []cli.Flag{
+ &cli.BoolFlag{
+ Name: "exec-trace",
+ Usage: "Print the execution trace",
+ },
+ },
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 1 {
return lcli.IncorrectNumArgs(cctx)
@@ -36,6 +42,48 @@ var msgCmd = &cli.Command{
return err
}
+ api, closer, err := lcli.GetFullNodeAPI(cctx)
+ if err != nil {
+ return err
+ }
+ defer closer()
+
+ ctx := lcli.ReqContext(cctx)
+
+ // Get the CID of the message
+ mcid := msg.Cid()
+
+ // Search for the message on-chain
+ lookup, err := api.StateSearchMsg(ctx, mcid)
+ if err != nil {
+ return err
+ }
+ if lookup == nil {
+ fmt.Println("Message not found on-chain. Continuing...")
+ } else {
+ // Replay the message to get the execution trace
+ res, err := api.StateReplay(ctx, types.EmptyTSK, mcid)
+ if err != nil {
+ return xerrors.Errorf("replay call failed: %w", err)
+ }
+
+ if cctx.Bool("exec-trace") {
+ // Print the execution trace
+ color.Green("Execution trace:")
+ trace, err := json.MarshalIndent(res.ExecutionTrace, "", " ")
+ if err != nil {
+ return xerrors.Errorf("marshaling execution trace: %w", err)
+ }
+ fmt.Println(string(trace))
+ fmt.Println()
+
+ color.Green("Receipt:")
+ fmt.Printf("Exit code: %d\n", res.MsgRct.ExitCode)
+ fmt.Printf("Return: %x\n", res.MsgRct.Return)
+ fmt.Printf("Gas Used: %d\n", res.MsgRct.GasUsed)
+ }
+ }
+
switch msg := msg.(type) {
case *types.SignedMessage:
return printSignedMessage(cctx, msg)
diff --git a/cmd/lotus-shed/state-stats.go b/cmd/lotus-shed/state-stats.go
index 4aec02091..4eb00f981 100644
--- a/cmd/lotus-shed/state-stats.go
+++ b/cmd/lotus-shed/state-stats.go
@@ -1,11 +1,14 @@
package main
import (
+ "bytes"
"context"
"encoding/json"
"fmt"
"io"
+ "path"
"reflect"
+ "sort"
"sync"
"github.com/docker/go-units"
@@ -21,8 +24,12 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
+ gstactors "github.com/filecoin-project/go-state-types/actors"
+ "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/blockstore"
+ "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
@@ -49,6 +56,19 @@ type fieldItem struct {
Stats api.ObjStat
}
+type job struct {
+ c cid.Cid
+ key string // prefix path for the region being recorded i.e. "/state/mineractor"
+}
+type cidCall struct {
+ c cid.Cid
+ resp chan bool
+}
+type result struct {
+ key string
+ stats api.ObjStat
+}
+
type cacheNodeGetter struct {
ds format.NodeGetter
cache *lru.TwoQueueCache[cid.Cid, format.Node]
@@ -166,39 +186,13 @@ var statObjCmd = &cli.Command{
return err
}
- r, err := repo.NewFS(cctx.String("repo"))
- if err != nil {
- return xerrors.Errorf("opening fs repo: %w", err)
- }
-
- exists, err := r.Exists()
+ h, err := loadChainStore(ctx, cctx.String("repo"))
if err != nil {
return err
}
- if !exists {
- return xerrors.Errorf("lotus repo doesn't exist")
- }
+ defer h.closer()
- lr, err := r.Lock(repo.FullNode)
- if err != nil {
- return err
- }
- defer lr.Close() //nolint:errcheck
-
- bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
- if err != nil {
- return fmt.Errorf("failed to open blockstore: %w", err)
- }
-
- defer func() {
- if c, ok := bs.(io.Closer); ok {
- if err := c.Close(); err != nil {
- log.Warnf("failed to close blockstore: %s", err)
- }
- }
- }()
-
- dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ dag := merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
dsc := &dagStatCollector{
ds: dag,
walk: carWalkFunc,
@@ -212,6 +206,376 @@ var statObjCmd = &cli.Command{
},
}
+type StoreHandle struct {
+ bs blockstore.Blockstore
+ cs *store.ChainStore
+ sm *stmgr.StateManager
+ closer func()
+}
+
+func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) {
+ r, err := repo.NewFS(repoPath)
+ if err != nil {
+ return nil, xerrors.Errorf("opening fs repo: %w", err)
+ }
+
+ exists, err := r.Exists()
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ lr, err := r.Lock(repo.FullNode)
+ if err != nil {
+ return nil, err
+ }
+
+ bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open blockstore: %w", err)
+ }
+
+ closer := func() {
+ if err := lr.Close(); err != nil {
+ log.Warnf("failed to close locked repo: %s", err)
+ }
+ if c, ok := bs.(io.Closer); ok {
+ if err := c.Close(); err != nil {
+ log.Warnf("failed to close blockstore: %s", err)
+ }
+ }
+ }
+
+ mds, err := lr.Datastore(context.Background(), "/metadata")
+ if err != nil {
+ return nil, err
+ }
+
+ cs := store.NewChainStore(bs, bs, mds, nil, nil)
+ if err := cs.Load(ctx); err != nil {
+ return nil, fmt.Errorf("failed to load chain store: %w", err)
+ }
+
+ tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
+ sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open state manager: %w", err)
+ }
+ handle := StoreHandle{
+ bs: bs,
+ sm: sm,
+ cs: cs,
+ closer: closer,
+ }
+
+ return &handle, nil
+}
+
+func pipeline(ctx context.Context, name string, numWorkers int, createJobs func(ctx context.Context, jobCh chan job, resultCh chan result) error,
+ worker func(ctx context.Context, id int, jobCh chan job, resultCh chan result) error,
+ processResults func(ctx context.Context, resultCh chan result) error) error {
+
+ eg, egctx := errgroup.WithContext(ctx)
+ jobCh := make(chan job, numWorkers)
+ resultCh := make(chan result)
+ var resultWriterWg sync.WaitGroup
+
+ resultWriterWg.Add(1)
+ eg.Go(func() error {
+ defer resultWriterWg.Done()
+ defer func() {
+ close(jobCh)
+ }()
+ return createJobs(ctx, jobCh, resultCh)
+ })
+
+ var id int
+ for w := 0; w < numWorkers; w++ {
+ id = w
+
+ resultWriterWg.Add(1)
+ eg.Go(func() error {
+ defer resultWriterWg.Done()
+ return worker(egctx, id, jobCh, resultCh)
+ })
+ }
+
+ eg.Go(func() error {
+ return processResults(ctx, resultCh)
+ })
+
+ // close result channel when workers are done sending to it.
+ eg.Go(func() error {
+ resultWriterWg.Wait()
+ close(resultCh)
+ return nil
+ })
+
+ if err := eg.Wait(); err != nil {
+ return fmt.Errorf("failed pipeline %s: %w", name, err)
+ }
+ return nil
+}
+
+var statSnapshotCmd = &cli.Command{
+ Name: "stat-snapshot",
+ Usage: "calculates the space usage of a snapshot taken from the given tipset",
+ Description: `Walk the chain back to lightweight snapshot size and break down space usage into high level
+ categories: headers, messages, receipts, latest state root, and churn from earlier state roots.
+ State root and churn space is further broken down by actor type and immediate top level fields
+ `,
+ Flags: []cli.Flag{
+ &cli.StringFlag{
+ Name: "tipset",
+ Usage: "specify tipset to call method on (pass comma separated array of cids)",
+ },
+ &cli.IntFlag{
+ Name: "workers",
+ Usage: "number of workers to use when processing",
+ Value: 10,
+ },
+ &cli.IntFlag{
+ Name: "dag-cache-size",
+ Usage: "cache size per worker (setting to 0 disables)",
+ Value: 8092,
+ },
+ &cli.BoolFlag{
+ Name: "pretty",
+ Usage: "print formated output instead of ldjson",
+ Value: false,
+ },
+ },
+ Action: func(cctx *cli.Context) error {
+ ctx := lcli.ReqContext(cctx)
+ h, err := loadChainStore(ctx, cctx.String("repo"))
+ if err != nil {
+ return err
+ }
+ defer h.closer()
+ tsr := &ChainStoreTipSetResolver{
+ Chain: h.cs,
+ }
+
+ ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
+ if err != nil {
+ return err
+ }
+
+ numWorkers := cctx.Int("workers")
+ dagCacheSize := cctx.Int("dag-cache-size")
+
+ cidCh := make(chan cidCall, numWorkers)
+ summary := make(map[string]api.ObjStat)
+ // snapshot root objects with no additional bytes or links
+ summary["/"] = api.ObjStat{Size: 0, Links: 0}
+ summary["/statetree"] = api.ObjStat{Size: 0, Links: 0}
+
+ combine := func(statsA, statsB api.ObjStat) api.ObjStat {
+ return api.ObjStat{
+ Size: statsA.Size + statsB.Size,
+ Links: statsA.Links + statsB.Links,
+ }
+ }
+
+ // Threadsafe cid set lives across different pipelines so not part of error group
+ go func() {
+ seen := cid.NewSet()
+ for {
+ select {
+ case call := <-cidCh:
+ call.resp <- seen.Visit(call.c)
+ case <-ctx.Done():
+ log.Infof("shutting down cid set goroutine: %s", ctx.Err())
+ return
+ }
+ }
+ }()
+ visit := func(c cid.Cid) bool {
+ ch := make(chan bool)
+ cidCh <- cidCall{c: c, resp: ch}
+ out := <-ch
+ return out
+ }
+ // Stage 1 walk all actors in latest state root
+ createJobsStage1 := func(ctx context.Context, jobCh chan job, _ chan result) error {
+ st, err := h.sm.StateTree(ts.ParentState())
+ if err != nil {
+ return err
+ }
+
+ return st.ForEach(func(_ address.Address, act *types.Actor) error {
+ actType := builtin.ActorNameByCode(act.Code)
+ actType = path.Base(actType) // strip away fil/
+ if actType == "" {
+ actType = act.Code.String()
+ }
+ jobCh <- job{c: act.Head, key: fmt.Sprintf("/statetree/latest/%s", actType)}
+
+ return nil
+ })
+ }
+
+ worker := func(ctx context.Context, id int, jobCh chan job, resultCh chan result) error {
+ var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
+ if dagCacheSize != 0 {
+ var err error
+ dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
+ if err != nil {
+ return err
+ }
+ }
+
+ for job := range jobCh {
+ stats, err := collectSnapshotJobStats(ctx, job, dag, visit)
+ if err != nil {
+ return err
+ }
+ for _, stat := range stats {
+ select {
+ case resultCh <- stat:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+ }
+ return nil
+ }
+
+ processResults := func(ctx context.Context, resultCh chan result) error {
+ for result := range resultCh {
+ if stat, ok := summary[result.key]; ok {
+ summary[result.key] = combine(stat, result.stats)
+
+ } else {
+ summary[result.key] = result.stats
+ }
+ }
+ return nil
+ }
+
+ if err := pipeline(ctx, "Latest State Actors", numWorkers, createJobsStage1, worker, processResults); err != nil {
+ return err
+ }
+
+ // Stage 2: walk the top of the latest state root
+ createJobsStage2 := func(ctx context.Context, jobCh chan job, _ chan result) error {
+ jobCh <- job{c: ts.ParentState(), key: "/statetree/latest"}
+ return nil
+ }
+
+ if err := pipeline(ctx, "Latest State HAMT", numWorkers, createJobsStage2, worker, processResults); err != nil {
+ return err
+ }
+
+ // Stage 3 walk the rest of the chain: headers, messages, churn
+ // ordering:
+ // for each header send jobs for messages, receipts, state tree churn
+ // don't walk header directly as it would just walk everything including parent tipsets
+
+ churnStateRoots := cid.NewSet()
+ createJobsStage3 := func(ctx context.Context, jobCh chan job, resultCh chan result) error {
+ // walk chain
+ var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
+ if dagCacheSize != 0 {
+ var err error
+ dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
+ if err != nil {
+ return err
+ }
+ }
+
+ blocksToWalk := ts.Cids()
+ startHeight := ts.Height()
+ snapshotStateLimit := abi.ChainEpoch(2000)
+
+ churnActorCache := cid.NewSet()
+ blocksTracked := cid.NewSet()
+ for len(blocksToWalk) > 0 {
+ blkCid := blocksToWalk[0]
+ blocksToWalk = blocksToWalk[1:]
+ nd, err := dag.Get(ctx, blkCid)
+ if err != nil {
+ return xerrors.Errorf("getting block: %w", err)
+ }
+
+ var b types.BlockHeader
+ if err := b.UnmarshalCBOR(bytes.NewBuffer(nd.RawData())); err != nil {
+ return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blkCid, err)
+ }
+
+ // header directly to result channel
+ resultCh <- result{key: "/headers", stats: api.ObjStat{Size: uint64(len(nd.RawData())), Links: uint64(len(nd.Links()))}}
+ // message job
+ if b.Height > startHeight-snapshotStateLimit {
+ jobCh <- job{key: "/messages", c: b.Messages}
+ }
+
+ // state churn job
+ if b.Height > startHeight-snapshotStateLimit {
+ if churnStateRoots.Visit(b.ParentStateRoot) {
+ st, err := h.sm.StateTree(b.ParentStateRoot)
+ if err != nil {
+ return err
+ }
+
+ err = st.ForEach(func(_ address.Address, act *types.Actor) error {
+ if churnActorCache.Visit(act.Head) {
+ actType := builtin.ActorNameByCode(act.Code)
+ actType = path.Base(actType) // strip away fil/
+ if actType == "" {
+ actType = act.Code.String()
+ }
+ jobCh <- job{c: act.Head, key: fmt.Sprintf("/statetree/churn/%s", actType)}
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for _, blkCid := range b.Parents {
+ if blocksTracked.Visit(blkCid) && b.Height != 0 {
+ blocksToWalk = append(blocksToWalk, blkCid)
+ }
+ }
+ }
+ return nil
+ }
+
+ if err := pipeline(ctx, "Churn, Headers, Messages", numWorkers, createJobsStage3, worker, processResults); err != nil {
+ return err
+ }
+
+ // step 1 clean things up and get a nice abstraction to reuse
+ // Stage 4 walk all actor HAMTs for churn
+
+ createJobsStage4 := func(ctx context.Context, jobCh chan job, _ chan result) error {
+ return churnStateRoots.ForEach(func(c cid.Cid) error {
+ jobCh <- job{c: c, key: "/statetree/churn"}
+ return nil
+ })
+ }
+
+ if err := pipeline(ctx, "Churn HAMT", numWorkers, createJobsStage4, worker, processResults); err != nil {
+ return err
+ }
+
+ if cctx.Bool("pretty") {
+ DumpSnapshotStats(summary)
+ } else {
+ if err := DumpJSON(summary); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ },
+}
+
var statActorCmd = &cli.Command{
Name: "stat-actor",
Usage: "calculates the size of actors and their immeidate structures",
@@ -265,57 +629,14 @@ to reduce the number of decode operations performed by caching the decoded objec
addrs = append(addrs, addr)
}
}
-
- r, err := repo.NewFS(cctx.String("repo"))
- if err != nil {
- return xerrors.Errorf("opening fs repo: %w", err)
- }
-
- exists, err := r.Exists()
- if err != nil {
- return err
- }
- if !exists {
- return xerrors.Errorf("lotus repo doesn't exist")
- }
-
- lr, err := r.Lock(repo.FullNode)
- if err != nil {
- return err
- }
- defer lr.Close() //nolint:errcheck
-
- bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
- if err != nil {
- return fmt.Errorf("failed to open blockstore: %w", err)
- }
-
- defer func() {
- if c, ok := bs.(io.Closer); ok {
- if err := c.Close(); err != nil {
- log.Warnf("failed to close blockstore: %s", err)
- }
- }
- }()
-
- mds, err := lr.Datastore(context.Background(), "/metadata")
- if err != nil {
- return err
- }
-
- cs := store.NewChainStore(bs, bs, mds, nil, nil)
- if err := cs.Load(ctx); err != nil {
- return nil
- }
-
- tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
- sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
+ h, err := loadChainStore(ctx, cctx.String("repo"))
if err != nil {
return err
}
+ defer h.closer()
tsr := &ChainStoreTipSetResolver{
- Chain: cs,
+ Chain: h.cs,
}
ts, err := lcli.LoadTipSet(ctx, cctx, tsr)
@@ -327,7 +648,7 @@ to reduce the number of decode operations performed by caching the decoded objec
if len(addrs) == 0 && cctx.Bool("all") {
var err error
- addrs, err = sm.ListAllActors(ctx, ts)
+ addrs, err = h.sm.ListAllActors(ctx, ts)
if err != nil {
return err
}
@@ -354,15 +675,15 @@ to reduce the number of decode operations performed by caching the decoded objec
return nil
}
- actor, err := sm.LoadActor(ctx, addr, ts)
+ actor, err := h.sm.LoadActor(ctx, addr, ts)
if err != nil {
return err
}
- var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
+ var dag format.NodeGetter = merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs)))
if dagCacheSize != 0 {
var err error
- dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))), dagCacheSize)
+ dag, err = newCacheNodeGetter(merkledag.NewDAGService(blockservice.New(h.bs, offline.Exchange(h.bs))), dagCacheSize)
if err != nil {
return err
}
@@ -427,6 +748,93 @@ to reduce the number of decode operations performed by caching the decoded objec
},
}
+func collectSnapshotJobStats(ctx context.Context, in job, dag format.NodeGetter, visit func(c cid.Cid) bool) ([]result, error) {
+ // "state" and "churn" attempt further breakdown by actor type
+ if !(path.Dir(in.key) == "/statetree/latest") && !(path.Dir(in.key) == "/statetree/churn") {
+ dsc := &dagStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ if err := merkledag.Walk(ctx, dsc.walkLinks, in.c, visit, merkledag.Concurrent()); err != nil {
+ return nil, err
+ }
+ return []result{{key: in.key, stats: dsc.stats}}, nil
+ }
+
+ // in.c is an actor head cid, try to unmarshal and create sub keys for different regions of state
+ nd, err := dag.Get(ctx, in.c)
+ if err != nil {
+ return nil, err
+ }
+ subjobs := make([]job, 0)
+ results := make([]result, 0)
+
+ // reconstruct actor for state parsing from key
+ av, err := gstactors.VersionForNetwork(network.Version20)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get actors version for network: %w", err)
+ }
+ code, ok := actors.GetActorCodeID(av, path.Base(in.key))
+ if !ok { // try parsing key directly
+ code, err = cid.Parse(path.Base(in.key))
+ if err != nil {
+ log.Debugf("failing to parse actor string: %s", path.Base(in.key))
+ }
+ }
+
+ actor := types.ActorV5{Head: in.c, Code: code}
+ oif, err := vm.DumpActorState(consensus.NewTipSetExecutor(filcns.RewardFunc).NewActorRegistry(), &actor, nd.RawData())
+ if err != nil {
+ oif = nil
+ }
+ // Account actors return nil from DumpActorState as they have no state
+ if oif != nil {
+ v := reflect.Indirect(reflect.ValueOf(oif))
+ for i := 0; i < v.NumField(); i++ {
+ varName := v.Type().Field(i).Name
+ varType := v.Type().Field(i).Type
+ varValue := v.Field(i).Interface()
+
+ if varType == reflect.TypeOf(cid.Cid{}) {
+ subjobs = append(subjobs, job{
+ key: fmt.Sprintf("%s/%s", in.key, varName),
+ c: varValue.(cid.Cid),
+ })
+ }
+ }
+ }
+
+ // Walk subfields
+ for _, job := range subjobs {
+ dsc := &dagStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ if err := merkledag.Walk(ctx, dsc.walkLinks, job.c, visit, merkledag.Concurrent()); err != nil {
+ return nil, err
+ }
+ var res result
+ res.key = job.key
+ res.stats = dsc.stats
+
+ results = append(results, res)
+ }
+
+ // now walk the top level object of actor state
+ dsc := &dagStatCollector{
+ ds: dag,
+ walk: carWalkFunc,
+ }
+
+ if err := merkledag.Walk(ctx, dsc.walkLinks, in.c, visit, merkledag.Concurrent()); err != nil {
+ return nil, err
+ }
+ results = append(results, result{key: in.key, stats: dsc.stats})
+ return results, nil
+}
+
func collectStats(ctx context.Context, addr address.Address, actor *types.Actor, dag format.NodeGetter) (actorStats, error) {
log.Infow("actor", "addr", addr, "code", actor.Code, "name", builtin.ActorNameByCode(actor.Code))
@@ -532,3 +940,19 @@ func DumpStats(actStats actorStats) {
fmt.Println("--------------------------------------------------------------------------")
}
+
+func DumpSnapshotStats(stats map[string]api.ObjStat) {
+ // sort keys so we get subkey locality
+ keys := make([]string, 0, len(stats))
+ for k := range stats {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ fmt.Printf("%-*s%-*s%-*s\n", 32, "Path", 24, "Size", 24, "\"Blocks\"")
+ for _, k := range keys {
+ stat := stats[k]
+ sizeStr := units.BytesSize(float64(stat.Size))
+ fmt.Printf("%-*s%-*s%-*s%-*d\n", 32, k, 10, sizeStr, 14, fmt.Sprintf("(%d)", stat.Size), 24, stat.Links)
+ }
+}
diff --git a/cmd/lotus-shed/stateroot-stats.go b/cmd/lotus-shed/stateroot-stats.go
index f429c4e64..16dfc5935 100644
--- a/cmd/lotus-shed/stateroot-stats.go
+++ b/cmd/lotus-shed/stateroot-stats.go
@@ -197,7 +197,7 @@ var staterootStatCmd = &cli.Command{
return err
}
- fmt.Printf("%s\t%s\t%d\n", inf.Addr, string(cmh.Digest), inf.Stat.Size)
+ fmt.Printf("%s\t%x\t%d\n", inf.Addr, cmh.Digest, inf.Stat.Size)
}
return nil
},
diff --git a/cmd/lotus-worker/main.go b/cmd/lotus-worker/main.go
index 944791275..6ad3a448e 100644
--- a/cmd/lotus-worker/main.go
+++ b/cmd/lotus-worker/main.go
@@ -8,12 +8,14 @@ import (
"net/http"
"os"
"path/filepath"
+ "reflect"
"strings"
"time"
"github.com/google/uuid"
"github.com/ipfs/go-datastore/namespace"
logging "github.com/ipfs/go-log/v2"
+ "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/urfave/cli/v2"
"go.opencensus.io/stats/view"
@@ -320,6 +322,29 @@ var runCmd = &cli.Command{
}
}
+ // Check DC-environment variable
+ sectorSizes := []string{"2KiB", "8MiB", "512MiB", "32GiB", "64GiB"}
+ resourcesType := reflect.TypeOf(storiface.Resources{})
+
+ for _, sectorSize := range sectorSizes {
+ for i := 0; i < resourcesType.NumField(); i++ {
+ field := resourcesType.Field(i)
+ envName := field.Tag.Get("envname")
+ if envName != "" {
+ // Check if DC_[SectorSize]_[ResourceRestriction] is set
+ envVar, ok := os.LookupEnv("DC_" + sectorSize + "_" + envName)
+ if ok {
+ // If it is set, convert it to DC_[ResourceRestriction]
+ err := os.Setenv("DC_"+envName, envVar)
+ if err != nil {
+ log.Fatalf("Error setting environment variable: %v", err)
+ }
+ log.Warnf("Converted DC_%s_%s to DC_%s, because DC is a sector-size independent job", sectorSize, envName, envName)
+ }
+ }
+ }
+ }
+
// Connect to storage-miner
ctx := lcli.ReqContext(cctx)
@@ -530,9 +555,14 @@ var runCmd = &cli.Command{
log.Info("Opening local storage; connecting to master")
const unspecifiedAddress = "0.0.0.0"
+
address := cctx.String("listen")
- addressSlice := strings.Split(address, ":")
- if ip := net.ParseIP(addressSlice[0]); ip != nil {
+ host, port, err := net.SplitHostPort(address)
+ if err != nil {
+ return err
+ }
+
+ if ip := net.ParseIP(host); ip != nil {
if ip.String() == unspecifiedAddress {
timeout, err := time.ParseDuration(cctx.String("timeout"))
if err != nil {
@@ -542,11 +572,21 @@ var runCmd = &cli.Command{
if err != nil {
return err
}
- address = rip + ":" + addressSlice[1]
+ host = rip
}
}
- localStore, err := paths.NewLocal(ctx, lr, nodeApi, []string{"http://" + address + "/remote"})
+ var newAddress string
+
+ // Check if the IP address is IPv6
+ ip := net.ParseIP(host)
+ if ip.To4() == nil && ip.To16() != nil {
+ newAddress = "[" + host + "]:" + port
+ } else {
+ newAddress = host + ":" + port
+ }
+
+ localStore, err := paths.NewLocal(ctx, lr, nodeApi, []string{"http://" + newAddress + "/remote"})
if err != nil {
return err
}
@@ -740,21 +780,48 @@ func extractRoutableIP(timeout time.Duration) (string, error) {
deprecatedMinerMultiAddrKey := "STORAGE_API_INFO"
env, ok := os.LookupEnv(minerMultiAddrKey)
if !ok {
- // TODO remove after deprecation period
_, ok = os.LookupEnv(deprecatedMinerMultiAddrKey)
if ok {
log.Warnf("Using a deprecated env(%s) value, please use env(%s) instead.", deprecatedMinerMultiAddrKey, minerMultiAddrKey)
}
return "", xerrors.New("MINER_API_INFO environment variable required to extract IP")
}
- minerAddr := strings.Split(env, "/")
- conn, err := net.DialTimeout("tcp", minerAddr[2]+":"+minerAddr[4], timeout)
+
+ // Splitting the env to separate the JWT from the multiaddress
+ splitEnv := strings.SplitN(env, ":", 2)
+ if len(splitEnv) < 2 {
+ return "", xerrors.Errorf("invalid MINER_API_INFO format")
+ }
+ // Only take the multiaddress part
+ maddrStr := splitEnv[1]
+
+ maddr, err := multiaddr.NewMultiaddr(maddrStr)
if err != nil {
return "", err
}
- defer conn.Close() //nolint:errcheck
+
+ minerIP, _ := maddr.ValueForProtocol(multiaddr.P_IP6)
+ minerPort, _ := maddr.ValueForProtocol(multiaddr.P_TCP)
+
+ // Check if the IP is IPv6 and format the address appropriately
+ var addressToDial string
+ if ip := net.ParseIP(minerIP); ip.To4() == nil && ip.To16() != nil {
+ addressToDial = "[" + minerIP + "]:" + minerPort
+ } else {
+ addressToDial = minerIP + ":" + minerPort
+ }
+
+ conn, err := net.DialTimeout("tcp", addressToDial, timeout)
+ if err != nil {
+ return "", err
+ }
+
+ defer func() {
+ if cerr := conn.Close(); cerr != nil {
+ log.Errorf("Error closing connection: %v", cerr)
+ }
+ }()
localAddr := conn.LocalAddr().(*net.TCPAddr)
-
- return strings.Split(localAddr.IP.String(), ":")[0], nil
+ return localAddr.IP.String(), nil
}
diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go
index 106446c0a..7271a6e53 100644
--- a/cmd/lotus/daemon.go
+++ b/cmd/lotus/daemon.go
@@ -12,16 +12,14 @@ import (
"io"
"os"
"path"
+ "path/filepath"
"runtime/pprof"
"strings"
"github.com/DataDog/zstd"
- "github.com/ipfs/go-cid"
- levelds "github.com/ipfs/go-ds-leveldb"
metricsprom "github.com/ipfs/go-metrics-prometheus"
"github.com/mitchellh/go-homedir"
"github.com/multiformats/go-multiaddr"
- ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/urfave/cli/v2"
"go.opencensus.io/plugin/runmetrics"
"go.opencensus.io/stats"
@@ -30,19 +28,14 @@ import (
"golang.org/x/xerrors"
"gopkg.in/cheggaaa/pb.v1"
- "github.com/filecoin-project/go-address"
- cborutil "github.com/filecoin-project/go-cbor-util"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-paramfetch"
- "github.com/filecoin-project/go-state-types/builtin"
- "github.com/filecoin-project/specs-actors/actors/builtin/miner"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
- "github.com/filecoin-project/lotus/chain/gen/slashfilter"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@@ -128,6 +121,10 @@ var DaemonCmd = &cli.Command{
Name: "import-snapshot",
Usage: "import chain state from a given chain export file or url",
},
+ &cli.BoolFlag{
+ Name: "remove-existing-chain",
+ Usage: "remove existing chain and splitstore data on a snapshot-import",
+ },
&cli.BoolFlag{
Name: "halt-after-import",
Usage: "halt the process after importing chain from file",
@@ -169,19 +166,6 @@ var DaemonCmd = &cli.Command{
Name: "restore-config",
Usage: "config file to use when restoring from backup",
},
- &cli.BoolFlag{
- Name: "slash-consensus",
- Usage: "Report consensus fault",
- Value: false,
- },
- &cli.StringFlag{
- Name: "slasher-sender",
- Usage: "optionally specify the account to report consensus from",
- },
- &cli.StringFlag{
- Name: "slashdb-dir",
- Value: "slash watch db dir path",
- },
},
Action: func(cctx *cli.Context) error {
isLite := cctx.Bool("lite")
@@ -285,6 +269,26 @@ var DaemonCmd = &cli.Command{
}
}
+ if cctx.Bool("remove-existing-chain") {
+ lr, err := repo.NewFS(cctx.String("repo"))
+ if err != nil {
+ return xerrors.Errorf("error opening fs repo: %w", err)
+ }
+
+ exists, err := lr.Exists()
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return xerrors.Errorf("lotus repo doesn't exist")
+ }
+
+ err = removeExistingChain(cctx, lr)
+ if err != nil {
+ return err
+ }
+ }
+
chainfile := cctx.String("import-chain")
snapshot := cctx.String("import-snapshot")
if chainfile != "" || snapshot != "" {
@@ -402,14 +406,6 @@ var DaemonCmd = &cli.Command{
if err != nil {
return fmt.Errorf("failed to start json-rpc endpoint: %s", err)
}
- if cctx.IsSet("slash-consensus") && cctx.IsSet("slashdb-dir") {
- go func() {
- err := slashConsensus(api, cctx.String("slashdb-dir"), cctx.String("slasher-sender"))
- if err != nil {
- panic("slashConsensus error")
- }
- }()
- }
// Monitor for shutdown.
finishCh := node.MonitorShutdown(shutdownChan,
node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper},
@@ -565,13 +561,17 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
return err
}
- // TODO: We need to supply the actual beacon after v14
- stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
- if err != nil {
- return err
- }
-
if !snapshot {
+ shd, err := drand.BeaconScheduleFromDrandSchedule(build.DrandConfigSchedule(), gb.MinTimestamp(), nil)
+ if err != nil {
+ return xerrors.Errorf("failed to construct beacon schedule: %w", err)
+ }
+
+ stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
+ if err != nil {
+ return err
+ }
+
log.Infof("validating imported chain...")
if err := stm.ValidateChain(ctx, ts); err != nil {
return xerrors.Errorf("chain validation failed: %w", err)
@@ -604,121 +604,58 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
return nil
}
-func slashConsensus(a lapi.FullNode, p string, from string) error {
- ctx := context.Background()
- var fromAddr address.Address
-
- ds, err := levelds.NewDatastore(p, &levelds.Options{
- Compression: ldbopts.NoCompression,
- NoSync: false,
- Strict: ldbopts.StrictAll,
- ReadOnly: false,
- })
+func removeExistingChain(cctx *cli.Context, lr repo.Repo) error {
+ lockedRepo, err := lr.Lock(repo.FullNode)
if err != nil {
- return xerrors.Errorf("open leveldb: %w", err)
+ return xerrors.Errorf("error locking repo: %w", err)
}
- sf := slashfilter.New(ds)
- if from == "" {
- defaddr, err := a.WalletDefaultAddress(ctx)
- if err != nil {
- return err
- }
- fromAddr = defaddr
- } else {
- addr, err := address.NewFromString(from)
- if err != nil {
- return err
+ // Ensure that lockedRepo is closed when this function exits
+ defer func() {
+ if closeErr := lockedRepo.Close(); closeErr != nil {
+ log.Errorf("Error closing the lockedRepo: %v", closeErr)
}
+ }()
- fromAddr = addr
- }
-
- blocks, err := a.SyncIncomingBlocks(ctx)
+ cfg, err := lockedRepo.Config()
if err != nil {
- return xerrors.Errorf("sync incoming blocks failed: %w", err)
+ return xerrors.Errorf("error getting config: %w", err)
}
- for block := range blocks {
- log.Infof("deal with block: %d, %v, %s", block.Height, block.Miner, block.Cid())
- if otherBlock, extraBlock, err := slashFilterMinedBlock(ctx, sf, a, block); err != nil {
- if otherBlock == nil {
- continue
- }
- log.Errorf(" SLASH FILTER ERROR: %s", err)
- bh1, err := cborutil.Dump(otherBlock)
- if err != nil {
- log.Errorf("could not dump otherblock:%s, err:%s", otherBlock.Cid(), err)
- continue
- }
- bh2, err := cborutil.Dump(block)
- if err != nil {
- log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
- continue
- }
-
- params := miner.ReportConsensusFaultParams{
- BlockHeader1: bh1,
- BlockHeader2: bh2,
- }
- if extraBlock != nil {
- be, err := cborutil.Dump(extraBlock)
- if err != nil {
- log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
- continue
- }
- params.BlockHeaderExtra = be
- }
-
- enc, err := actors.SerializeParams(¶ms)
- if err != nil {
- log.Errorf("could not serialize declare faults parameters: %s", err)
- continue
- }
- message, err := a.MpoolPushMessage(ctx, &types.Message{
- To: block.Miner,
- From: fromAddr,
- Value: types.NewInt(0),
- Method: builtin.MethodsMiner.ReportConsensusFault,
- Params: enc,
- }, nil)
- if err != nil {
- log.Errorf("ReportConsensusFault to messagepool error:%w", err)
- continue
- }
- log.Infof("ReportConsensusFault message CID:%s", message.Cid())
+ fullNodeConfig, ok := cfg.(*config.FullNode)
+ if !ok {
+ return xerrors.Errorf("wrong config type: %T", cfg)
+ }
+ if fullNodeConfig.Chainstore.EnableSplitstore {
+ log.Info("removing splitstore directory...")
+ err = deleteSplitstoreDir(lockedRepo)
+ if err != nil {
+ return xerrors.Errorf("error removing splitstore directory: %w", err)
}
}
- return err
+
+ // Get the base repo path
+ repoPath := lockedRepo.Path()
+
+ // Construct the path to the chain directory
+ chainPath := filepath.Join(repoPath, "datastore", "chain")
+
+ log.Info("removing chain directory:", chainPath)
+
+ err = os.RemoveAll(chainPath)
+ if err != nil {
+ return xerrors.Errorf("error removing chain directory: %w", err)
+ }
+
+ log.Info("chain and splitstore data have been removed")
+ return nil
}
-func slashFilterMinedBlock(ctx context.Context, sf *slashfilter.SlashFilter, a lapi.FullNode, blockB *types.BlockHeader) (*types.BlockHeader, *types.BlockHeader, error) {
- blockC, err := a.ChainGetBlock(ctx, blockB.Parents[0])
+func deleteSplitstoreDir(lr repo.LockedRepo) error {
+ path, err := lr.SplitstorePath()
if err != nil {
- return nil, nil, xerrors.Errorf("chain get block error:%s", err)
- }
- otherCid, err := sf.MinedBlock(ctx, blockB, blockC.Height)
- if err != nil {
- return nil, nil, xerrors.Errorf("slash filter check block error:%s", err)
- }
- if otherCid != cid.Undef {
- otherHeader, err := a.ChainGetBlock(ctx, otherCid)
- return otherHeader, nil, xerrors.Errorf("chain get other block error:%s", err)
- }
- blockA, err := a.ChainGetBlock(ctx, otherCid)
-
- // (c) parent-grinding fault
- // Here extra is the "witness", a third block that shows the connection between A and B as
- // A's sibling and B's parent.
- // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset
- //
- // B
- // |
- // [A, C]
- if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
- types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
- return blockA, blockC, xerrors.Errorf("chain get other block error:%s", err)
+ return xerrors.Errorf("error getting splitstore path: %w", err)
}
- return nil, nil, nil
+ return os.RemoveAll(path)
}
diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go
index 8ff8a2b79..95711414b 100644
--- a/cmd/tvx/extract_message.go
+++ b/cmd/tvx/extract_message.go
@@ -15,7 +15,8 @@ import (
"github.com/filecoin-project/test-vectors/schema"
"github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/api/v0api"
+ lapi "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/actors/builtin"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
@@ -207,7 +208,7 @@ func doExtractMessage(opts extractOpts) error {
// TODO sometimes this returns a nil receipt and no error ¯\_(ツ)_/¯
// ex: https://filfox.info/en/message/bafy2bzacebpxw3yiaxzy2bako62akig46x3imji7fewszen6fryiz6nymu2b2
// This code is lenient and skips receipt comparison in case of a nil receipt.
- rec, err := FullAPI.StateGetReceipt(ctx, mcid, execTs.Key())
+ rec, err := FullAPI.StateSearchMsg(ctx, execTs.Key(), mcid, api.LookbackNoLimit, false)
if err != nil {
return fmt.Errorf("failed to find receipt on chain: %w", err)
}
@@ -217,9 +218,9 @@ func doExtractMessage(opts extractOpts) error {
var receipt *schema.Receipt
if rec != nil {
receipt = &schema.Receipt{
- ExitCode: int64(rec.ExitCode),
- ReturnValue: rec.Return,
- GasUsed: rec.GasUsed,
+ ExitCode: int64(rec.Receipt.ExitCode),
+ ReturnValue: rec.Receipt.Return,
+ GasUsed: rec.Receipt.GasUsed,
}
reporter := new(conformance.LogReporter)
@@ -326,7 +327,7 @@ func doExtractMessage(opts extractOpts) error {
// resolveFromChain queries the chain for the provided message, using the block CID to
// speed up the query, if provided
-func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) {
+func resolveFromChain(ctx context.Context, api lapi.FullNode, mcid cid.Cid, block string) (msg *types.Message, execTs *types.TipSet, incTs *types.TipSet, err error) {
// Extract the full message.
msg, err = api.ChainGetMessage(ctx, mcid)
if err != nil {
@@ -339,7 +340,7 @@ func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, blo
log.Printf("locating message in blockchain")
// Locate the message.
- msgInfo, err := api.StateSearchMsg(ctx, mcid)
+ msgInfo, err := api.StateSearchMsg(ctx, types.EmptyTSK, mcid, lapi.LookbackNoLimit, false)
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err)
}
@@ -384,7 +385,7 @@ func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, blo
// as the previous tipset. In the context of vector generation, the target
// tipset is the one where a message was executed, and the previous tipset is
// the one where the message was included.
-func fetchThisAndPrevTipset(ctx context.Context, api v0api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) {
+func fetchThisAndPrevTipset(ctx context.Context, api v1api.FullNode, target types.TipSetKey) (targetTs *types.TipSet, prevTs *types.TipSet, err error) {
// get the tipset on which this message was "executed" on.
// https://github.com/filecoin-project/lotus/issues/2847
targetTs, err = api.ChainGetTipSet(ctx, target)
diff --git a/cmd/tvx/main.go b/cmd/tvx/main.go
index b1541e4e1..5021dd64b 100644
--- a/cmd/tvx/main.go
+++ b/cmd/tvx/main.go
@@ -10,13 +10,13 @@ import (
"github.com/filecoin-project/go-jsonrpc"
- "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/api/v1api"
lcli "github.com/filecoin-project/lotus/cli"
)
// FullAPI is a JSON-RPC client targeting a full node. It's initialized in a
// cli.BeforeFunc.
-var FullAPI v0api.FullNode
+var FullAPI v1api.FullNode
// Closer is the closer for the JSON-RPC client, which must be called on
// cli.AfterFunc.
@@ -102,7 +102,7 @@ func initialize(c *cli.Context) error {
// Make the API client.
var err error
- if FullAPI, Closer, err = lcli.GetFullNodeAPI(c); err != nil {
+ if FullAPI, Closer, err = lcli.GetFullNodeAPIV1(c); err != nil {
err = fmt.Errorf("failed to locate Lotus node; err: %w", err)
}
return err
diff --git a/cmd/tvx/state.go b/cmd/tvx/state.go
index 120eddd6b..9674bf17e 100644
--- a/cmd/tvx/state.go
+++ b/cmd/tvx/state.go
@@ -14,7 +14,8 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/api/v1api"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/types"
@@ -24,13 +25,13 @@ import (
// StateSurgeon is an object used to fetch and manipulate state.
type StateSurgeon struct {
ctx context.Context
- api v0api.FullNode
+ api v1api.FullNode
stores *Stores
}
// NewSurgeon returns a state surgeon, an object used to fetch and manipulate
// state.
-func NewSurgeon(ctx context.Context, api v0api.FullNode, stores *Stores) *StateSurgeon {
+func NewSurgeon(ctx context.Context, api v1api.FullNode, stores *Stores) *StateSurgeon {
return &StateSurgeon{
ctx: ctx,
api: api,
@@ -86,9 +87,9 @@ func (sg *StateSurgeon) GetMaskedStateTree(previousRoot cid.Cid, retain []addres
// GetAccessedActors identifies the actors that were accessed during the
// execution of a message.
-func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a v0api.FullNode, mid cid.Cid) ([]address.Address, error) {
+func (sg *StateSurgeon) GetAccessedActors(ctx context.Context, a v1api.FullNode, mid cid.Cid) ([]address.Address, error) {
log.Printf("calculating accessed actors during execution of message: %s", mid)
- msgInfo, err := a.StateSearchMsg(ctx, mid)
+ msgInfo, err := a.StateSearchMsg(ctx, types.EmptyTSK, mid, api.LookbackNoLimit, false)
if err != nil {
return nil, err
}
diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go
index d4431a145..0ced44817 100644
--- a/cmd/tvx/stores.go
+++ b/cmd/tvx/stores.go
@@ -18,7 +18,7 @@ import (
format "github.com/ipfs/go-ipld-format"
"golang.org/x/xerrors"
- "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
@@ -39,7 +39,7 @@ type Stores struct {
// NewProxyingStores is a set of Stores backed by a proxying Blockstore that
// proxies Get requests for unknown CIDs to a Filecoin node, via the
// ChainReadObj RPC.
-func NewProxyingStores(ctx context.Context, api v0api.FullNode) *Stores {
+func NewProxyingStores(ctx context.Context, api v1api.FullNode) *Stores {
ds := dssync.MutexWrap(ds.NewMapDatastore())
bs := &proxyingBlockstore{
ctx: ctx,
@@ -84,7 +84,7 @@ type TracingBlockstore interface {
// a Filecoin node via JSON-RPC.
type proxyingBlockstore struct {
ctx context.Context
- api v0api.FullNode
+ api v1api.FullNode
lk sync.Mutex
tracing bool
diff --git a/conformance/driver.go b/conformance/driver.go
index eb5973f72..3c62ca7b9 100644
--- a/conformance/driver.go
+++ b/conformance/driver.go
@@ -23,6 +23,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
+ "github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@@ -89,9 +90,9 @@ type ExecuteTipsetParams struct {
ParentEpoch abi.ChainEpoch
Tipset *schema.Tipset
ExecEpoch abi.ChainEpoch
- // Rand is an optional vm.Rand implementation to use. If nil, the driver
- // will use a vm.Rand that returns a fixed value for all calls.
- Rand vm.Rand
+ // Rand is an optional rand.Rand implementation to use. If nil, the driver
+ // will use a rand.Rand that returns a fixed value for all calls.
+ Rand rand.Rand
// BaseFee if not nil or zero, will override the basefee of the tipset.
BaseFee abi.TokenAmount
}
@@ -200,9 +201,9 @@ type ExecuteMessageParams struct {
BaseFee abi.TokenAmount
NetworkVersion network.Version
- // Rand is an optional vm.Rand implementation to use. If nil, the driver
- // will use a vm.Rand that returns a fixed value for all calls.
- Rand vm.Rand
+ // Rand is an optional rand.Rand implementation to use. If nil, the driver
+ // will use a rand.Rand that returns a fixed value for all calls.
+ Rand rand.Rand
// Lookback is the LookbackStateGetter; returns the state tree at a given epoch.
Lookback vm.LookbackStateGetter
diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go
index d356b53d0..f35f05cd4 100644
--- a/conformance/rand_fixed.go
+++ b/conformance/rand_fixed.go
@@ -4,25 +4,24 @@ import (
"context"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/crypto"
- "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/chain/rand"
)
type fixedRand struct{}
-var _ vm.Rand = (*fixedRand)(nil)
+var _ rand.Rand = (*fixedRand)(nil)
// NewFixedRand creates a test vm.Rand that always returns fixed bytes value
// of utf-8 string 'i_am_random_____i_am_random_____'.
-func NewFixedRand() vm.Rand {
+func NewFixedRand() rand.Rand {
return &fixedRand{}
}
-func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
- return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
+func (r *fixedRand) GetChainRandomness(_ context.Context, _ abi.ChainEpoch) ([32]byte, error) {
+ return *(*[32]byte)([]byte("i_am_random_____i_am_random_____")), nil
}
-func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) {
- return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes.
+func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ abi.ChainEpoch) ([32]byte, error) {
+ return *(*[32]byte)([]byte("i_am_random_____i_am_random_____")), nil // 32 bytes.
}
diff --git a/conformance/rand_record.go b/conformance/rand_record.go
index 277c984a7..4dc30b28e 100644
--- a/conformance/rand_record.go
+++ b/conformance/rand_record.go
@@ -6,17 +6,16 @@ import (
"sync"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/test-vectors/schema"
- "github.com/filecoin-project/lotus/api/v0api"
+ "github.com/filecoin-project/lotus/api/v1api"
+ "github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/chain/vm"
)
type RecordingRand struct {
reporter Reporter
- api v0api.FullNode
+ api v1api.FullNode
// once guards the loading of the head tipset.
// can be removed when https://github.com/filecoin-project/lotus/issues/4223
@@ -27,12 +26,12 @@ type RecordingRand struct {
recorded schema.Randomness
}
-var _ vm.Rand = (*RecordingRand)(nil)
+var _ rand.Rand = (*RecordingRand)(nil)
// NewRecordingRand returns a vm.Rand implementation that proxies calls to a
// full Lotus node via JSON-RPC, and records matching rules and responses so
// they can later be embedded in test vectors.
-func NewRecordingRand(reporter Reporter, api v0api.FullNode) *RecordingRand {
+func NewRecordingRand(reporter Reporter, api v1api.FullNode) *RecordingRand {
return &RecordingRand{reporter: reporter, api: api}
}
@@ -44,22 +43,20 @@ func (r *RecordingRand) loadHead() {
r.head = head.Key()
}
-func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *RecordingRand) GetChainRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
r.once.Do(r.loadHead)
- // FullNode's v0 ChainGetRandomnessFromTickets handles whether we should be looking forward or back
- ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy)
+ // FullNode's v1 ChainGetRandomnessFromTickets handles whether we should be looking forward or back
+ ret, err := r.api.StateGetRandomnessDigestFromTickets(ctx, round, r.head)
if err != nil {
- return ret, err
+ return [32]byte{}, err
}
- r.reporter.Logf("fetched and recorded chain randomness for: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+ r.reporter.Logf("fetched and recorded chain randomness for: epoch=%d, result=%x", round, ret)
match := schema.RandomnessMatch{
On: schema.RandomnessRule{
- Kind: schema.RandomnessChain,
- DomainSeparationTag: int64(pers),
- Epoch: int64(round),
- Entropy: entropy,
+ Kind: schema.RandomnessChain,
+ Epoch: int64(round),
},
Return: []byte(ret),
}
@@ -67,24 +64,22 @@ func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.Doma
r.recorded = append(r.recorded, match)
r.lk.Unlock()
- return ret, err
+ return *(*[32]byte)(ret), err
}
-func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
r.once.Do(r.loadHead)
- ret, err := r.api.StateGetRandomnessFromBeacon(ctx, pers, round, entropy, r.head)
+ ret, err := r.api.StateGetRandomnessDigestFromBeacon(ctx, round, r.head)
if err != nil {
- return ret, err
+ return [32]byte{}, err
}
- r.reporter.Logf("fetched and recorded beacon randomness for: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+ r.reporter.Logf("fetched and recorded beacon randomness for: epoch=%d, result=%x", round, ret)
match := schema.RandomnessMatch{
On: schema.RandomnessRule{
- Kind: schema.RandomnessBeacon,
- DomainSeparationTag: int64(pers),
- Epoch: int64(round),
- Entropy: entropy,
+ Kind: schema.RandomnessBeacon,
+ Epoch: int64(round),
},
Return: []byte(ret),
}
@@ -92,7 +87,7 @@ func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.Dom
r.recorded = append(r.recorded, match)
r.lk.Unlock()
- return ret, err
+ return *(*[32]byte)(ret), err
}
func (r *RecordingRand) Recorded() schema.Randomness {
diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go
index ef19e41bb..6d78d813b 100644
--- a/conformance/rand_replay.go
+++ b/conformance/rand_replay.go
@@ -1,23 +1,21 @@
package conformance
import (
- "bytes"
"context"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/test-vectors/schema"
- "github.com/filecoin-project/lotus/chain/vm"
+ "github.com/filecoin-project/lotus/chain/rand"
)
type ReplayingRand struct {
reporter Reporter
recorded schema.Randomness
- fallback vm.Rand
+ fallback rand.Rand
}
-var _ vm.Rand = (*ReplayingRand)(nil)
+var _ rand.Rand = (*ReplayingRand)(nil)
// NewReplayingRand replays recorded randomness when requested, falling back to
// fixed randomness if the value cannot be found; hence this is a safe
@@ -30,50 +28,44 @@ func NewReplayingRand(reporter Reporter, recorded schema.Randomness) *ReplayingR
}
}
-func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) {
+func (r *ReplayingRand) match(requested schema.RandomnessRule) ([32]byte, bool) {
for _, other := range r.recorded {
if other.On.Kind == requested.Kind &&
- other.On.Epoch == requested.Epoch &&
- other.On.DomainSeparationTag == requested.DomainSeparationTag &&
- bytes.Equal(other.On.Entropy, requested.Entropy) {
- return other.Return, true
+ other.On.Epoch == requested.Epoch {
+ return *(*[32]byte)(other.Return), true
}
}
- return nil, false
+ return [32]byte{}, false
}
-func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *ReplayingRand) GetChainRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
rule := schema.RandomnessRule{
- Kind: schema.RandomnessChain,
- DomainSeparationTag: int64(pers),
- Epoch: int64(round),
- Entropy: entropy,
+ Kind: schema.RandomnessChain,
+ Epoch: int64(round),
}
if ret, ok := r.match(rule); ok {
- r.reporter.Logf("returning saved chain randomness: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+ r.reporter.Logf("returning saved chain randomness: epoch=%d, result=%x", round, ret)
return ret, nil
}
- r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
+ r.reporter.Logf("returning fallback chain randomness: epoch=%d", round)
- return r.fallback.GetChainRandomness(ctx, pers, round, entropy)
+ return r.fallback.GetChainRandomness(ctx, round)
}
-func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
+func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
rule := schema.RandomnessRule{
- Kind: schema.RandomnessBeacon,
- DomainSeparationTag: int64(pers),
- Epoch: int64(round),
- Entropy: entropy,
+ Kind: schema.RandomnessBeacon,
+ Epoch: int64(round),
}
if ret, ok := r.match(rule); ok {
- r.reporter.Logf("returning saved beacon randomness: dst=%d, epoch=%d, entropy=%x, result=%x", pers, round, entropy, ret)
+ r.reporter.Logf("returning saved beacon randomness: epoch=%d, result=%x", round, ret)
return ret, nil
}
- r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy)
+ r.reporter.Logf("returning fallback beacon randomness: epoch=%d, ", round)
- return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy)
+ return r.fallback.GetBeaconRandomness(ctx, round)
}
diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md
index 997778069..04c037bc5 100644
--- a/documentation/en/api-v0-methods-miner.md
+++ b/documentation/en/api-v0-methods-miner.md
@@ -475,7 +475,7 @@ Inputs:
],
"Bw==",
10101,
- 20
+ 21
]
```
diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md
index 2acc969d3..742f3de8e 100644
--- a/documentation/en/api-v0-methods.md
+++ b/documentation/en/api-v0-methods.md
@@ -4729,7 +4729,7 @@ Perms: read
Inputs:
```json
[
- 20
+ 21
]
```
@@ -4744,7 +4744,7 @@ Perms: read
Inputs:
```json
[
- 20
+ 21
]
```
@@ -4873,7 +4873,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -4897,7 +4902,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -5103,7 +5113,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -5127,7 +5142,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -5445,8 +5465,10 @@ Response:
"UpgradeSharkHeight": 10101,
"UpgradeHyggeHeight": 10101,
"UpgradeLightningHeight": 10101,
- "UpgradeThunderHeight": 10101
- }
+ "UpgradeThunderHeight": 10101,
+ "UpgradeWatermelonHeight": 10101
+ },
+ "Eip155ChainID": 123
}
```
@@ -6370,7 +6392,7 @@ Inputs:
]
```
-Response: `20`
+Response: `21`
### StateReadState
StateReadState returns the indicated actor's state.
@@ -6491,7 +6513,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -6515,7 +6542,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md
index e3c97eecf..e2c249395 100644
--- a/documentation/en/api-v1-unstable-methods.md
+++ b/documentation/en/api-v1-unstable-methods.md
@@ -104,6 +104,8 @@
* [EthSendRawTransaction](#EthSendRawTransaction)
* [EthSubscribe](#EthSubscribe)
* [EthSyncing](#EthSyncing)
+ * [EthTraceBlock](#EthTraceBlock)
+ * [EthTraceReplayBlockTransactions](#EthTraceReplayBlockTransactions)
* [EthUninstallFilter](#EthUninstallFilter)
* [EthUnsubscribe](#EthUnsubscribe)
* [Filecoin](#Filecoin)
@@ -235,6 +237,8 @@
* [StateGetClaim](#StateGetClaim)
* [StateGetClaims](#StateGetClaims)
* [StateGetNetworkParams](#StateGetNetworkParams)
+ * [StateGetRandomnessDigestFromBeacon](#StateGetRandomnessDigestFromBeacon)
+ * [StateGetRandomnessDigestFromTickets](#StateGetRandomnessDigestFromTickets)
* [StateGetRandomnessFromBeacon](#StateGetRandomnessFromBeacon)
* [StateGetRandomnessFromTickets](#StateGetRandomnessFromTickets)
* [StateListActors](#StateListActors)
@@ -3081,6 +3085,99 @@ Inputs: `null`
Response: `false`
+### EthTraceBlock
+TraceAPI related methods
+
+Returns traces created at given block
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value"
+]
+```
+
+Response:
+```json
+[
+ {
+ "action": {
+ "callType": "string value",
+ "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "gas": "0x5",
+ "input": "0x07",
+ "value": "0x0"
+ },
+ "result": {
+ "gasUsed": "0x5",
+ "output": "0x07"
+ },
+ "subtraces": 123,
+ "traceAddress": [
+ 123
+ ],
+ "Type": "string value",
+ "blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
+ "blockNumber": 9,
+ "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
+ "transactionPosition": 123
+ }
+]
+```
+
+### EthTraceReplayBlockTransactions
+Replays all transactions in a block returning the requested traces for each transaction
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ "string value",
+ [
+ "string value"
+ ]
+]
+```
+
+Response:
+```json
+[
+ {
+ "output": "0x07",
+ "stateDiff": "string value",
+ "trace": [
+ {
+ "action": {
+ "callType": "string value",
+ "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
+ "gas": "0x5",
+ "input": "0x07",
+ "value": "0x0"
+ },
+ "result": {
+ "gasUsed": "0x5",
+ "output": "0x07"
+ },
+ "subtraces": 123,
+ "traceAddress": [
+ 123
+ ],
+ "Type": "string value"
+ }
+ ],
+ "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
+ "vmTrace": "string value"
+ }
+]
+```
+
### EthUninstallFilter
Uninstalls a filter with given id.
@@ -6166,7 +6263,7 @@ Perms: read
Inputs:
```json
[
- 20
+ 21
]
```
@@ -6181,7 +6278,7 @@ Perms: read
Inputs:
```json
[
- 20
+ 21
]
```
@@ -6310,7 +6407,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -6334,7 +6436,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -6540,7 +6647,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -6564,7 +6676,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -6957,11 +7074,59 @@ Response:
"UpgradeSharkHeight": 10101,
"UpgradeHyggeHeight": 10101,
"UpgradeLightningHeight": 10101,
- "UpgradeThunderHeight": 10101
- }
+ "UpgradeThunderHeight": 10101,
+ "UpgradeWatermelonHeight": 10101
+ },
+ "Eip155ChainID": 123
}
```
+### StateGetRandomnessDigestFromBeacon
+StateGetRandomnessDigestFromBeacon is used to sample the beacon for randomness.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 10101,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"Bw=="`
+
+### StateGetRandomnessDigestFromTickets
+StateGetRandomnessDigestFromTickets. is used to sample the chain for randomness.
+
+
+Perms: read
+
+Inputs:
+```json
+[
+ 10101,
+ [
+ {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ },
+ {
+ "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve"
+ }
+ ]
+]
+```
+
+Response: `"Bw=="`
+
### StateGetRandomnessFromBeacon
StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
@@ -7890,7 +8055,7 @@ Inputs:
]
```
-Response: `20`
+Response: `21`
### StateReadState
StateReadState returns the indicated actor's state.
@@ -8011,7 +8176,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
@@ -8035,7 +8205,12 @@ Response:
"Value": "0",
"Method": 1,
"Params": "Ynl0ZSBhcnJheQ==",
- "ParamsCodec": 42
+ "ParamsCodec": 42,
+ "GasLimit": 42,
+ "ReadOnly": true,
+ "CodeCid": {
+ "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
+ }
},
"MsgRct": {
"ExitCode": 0,
diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md
index ee8f45837..8406b07cc 100644
--- a/documentation/en/cli-lotus-miner.md
+++ b/documentation/en/cli-lotus-miner.md
@@ -7,7 +7,7 @@ USAGE:
lotus-miner [global options] command [command options] [arguments...]
VERSION:
- 1.23.3-dev
+ 1.23.5-dev
COMMANDS:
init Initialize a lotus miner repo
diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md
index bdf992e58..66d0266c3 100644
--- a/documentation/en/cli-lotus-worker.md
+++ b/documentation/en/cli-lotus-worker.md
@@ -7,7 +7,7 @@ USAGE:
lotus-worker [global options] command [command options] [arguments...]
VERSION:
- 1.23.3-dev
+ 1.23.5-dev
COMMANDS:
run Start lotus worker
diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md
index fe63acbc6..e1dc0660b 100644
--- a/documentation/en/cli-lotus.md
+++ b/documentation/en/cli-lotus.md
@@ -7,7 +7,7 @@ USAGE:
lotus [global options] command [command options] [arguments...]
VERSION:
- 1.23.3-dev
+ 1.23.5-dev
COMMANDS:
daemon Start a lotus daemon process
@@ -65,6 +65,7 @@ OPTIONS:
--bootstrap (default: true)
--import-chain value on first run, load chain from given file or url and validate
--import-snapshot value import chain state from a given chain export file or url
+ --remove-existing-chain remove existing chain and splitstore data on a snapshot-import (default: false)
--halt-after-import halt the process after importing chain from file (default: false)
--lite start lotus in lite mode (default: false)
--pprof value specify name of file for writing cpu profile to
@@ -74,9 +75,6 @@ OPTIONS:
--api-max-req-size value maximum API request size accepted by the JSON RPC server (default: 0)
--restore value restore from backup file
--restore-config value config file to use when restoring from backup
- --slash-consensus Report consensus fault (default: false)
- --slasher-sender value optionally specify the account to report consensus from
- --slashdb-dir value (default: "slash watch db dir path")
--help, -h show help
```
diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml
index 8e99869a5..c37e40f74 100644
--- a/documentation/en/default-lotus-config.toml
+++ b/documentation/en/default-lotus-config.toml
@@ -399,3 +399,32 @@
#EnableMsgIndex = false
+[FaultReporter]
+ # EnableConsensusFaultReporter controls whether the node will monitor and
+ # report consensus faults. When enabled, the node will watch for malicious
+ # behaviors like double-mining and parent grinding, and submit reports to the
+ # network. This can earn reporter rewards, but is not guaranteed. Nodes should
+ # enable fault reporting with care, as it may increase resource usage, and may
+ # generate gas fees without earning rewards.
+ #
+ # type: bool
+ # env var: LOTUS_FAULTREPORTER_ENABLECONSENSUSFAULTREPORTER
+ #EnableConsensusFaultReporter = false
+
+ # ConsensusFaultReporterDataDir is the path where fault reporter state will be
+ # persisted. This directory should have adequate space and permissions for the
+ # node process.
+ #
+ # type: string
+ # env var: LOTUS_FAULTREPORTER_CONSENSUSFAULTREPORTERDATADIR
+ #ConsensusFaultReporterDataDir = ""
+
+ # ConsensusFaultReporterAddress is the wallet address used for submitting
+ # ReportConsensusFault messages. It will pay for gas fees, and receive any
+ # rewards. This address should have adequate funds to cover gas fees.
+ #
+ # type: string
+ # env var: LOTUS_FAULTREPORTER_CONSENSUSFAULTREPORTERADDRESS
+ #ConsensusFaultReporterAddress = ""
+
+
diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml
index af409c5ad..5400c693d 100644
--- a/documentation/en/default-lotus-miner-config.toml
+++ b/documentation/en/default-lotus-miner-config.toml
@@ -589,12 +589,6 @@
# env var: LOTUS_SEALING_DISABLECOLLATERALFALLBACK
#DisableCollateralFallback = false
- # enable / disable precommit batching (takes effect after nv13)
- #
- # type: bool
- # env var: LOTUS_SEALING_BATCHPRECOMMITS
- #BatchPreCommits = true
-
# maximum precommit batch size - batches will be sent immediately above this size
#
# type: int
@@ -644,7 +638,8 @@
#CommitBatchSlack = "1h0m0s"
# network BaseFee below which to stop doing precommit batching, instead
- # sending precommit messages to the chain individually
+ # sending precommit messages to the chain individually. When the basefee is
+ # below this threshold, precommit messages will get sent out immediately.
#
# type: types.FIL
# env var: LOTUS_SEALING_BATCHPRECOMMITABOVEBASEFEE
diff --git a/documentation/en/default-lotus-provider-config.toml b/documentation/en/default-lotus-provider-config.toml
new file mode 100644
index 000000000..a92a37350
--- /dev/null
+++ b/documentation/en/default-lotus-provider-config.toml
@@ -0,0 +1,2 @@
+[config]
+status = Coming Soon
diff --git a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md
index 9ae46fe57..116c615d3 100644
--- a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md
+++ b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md
@@ -2,28 +2,25 @@
# Lotus X.Y.Z Release
-
-## What will be in the release
-
+[//]: # (Open this issue as [WIP] Lotus vX.Y.Z)
+[//]: # (Apply the `tpm` label to it, and pin the issue on GitHub)
## 🚢 Estimated shipping date
-## 🔦 Highlights
-
-< See Changelog>
-
## ✅ Release Checklist
-**Note for whomever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end.
+**Note for whoever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end.
First steps:
-
+ - [ ] FFI: Fork a new branch (`release/lotus-vX.Y.Z`) from the filecoin-ffi `master` branch
+ - [ ] FFI: Tag the head of `release/lotus-vX.Y.Z` as `vX.Y.Z-pre1`
+ - [ ] Open and land a PR in lotus `master` that updates the FFI dependency to `vX.Y.Z-pre1` as cut in the previous step
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
- [ ] Bump the version in `build/version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release). Run make gen and make docsgen-cli before committing changes
-Prepping an RC:
+**Prepping an RC**:
- [ ] version string in `build/version.go` has been updated (in the `release/vX.Y.Z` branch)
- [ ] run `make gen && make docsgen-cli`
@@ -32,7 +29,7 @@ Prepping an RC:
- [ ] tag commit with `vX.Y.Z-rcN`
- [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true)
-Testing an RC:
+**Testing an RC**:
- [ ] **Stage 0 - Automated Testing**
- Automated Testing
@@ -69,7 +66,7 @@ Testing an RC:
- [ ] Update the [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) to the state that can be used as release note.
- [ ] Invite the wider community through (link to the release issue)
-- [ ] **Stage 4 - Stable Release**
+**Stable Release**
- [ ] Final preparation
- [ ] Verify that version string in [`version.go`](https://github.com/filecoin-project/lotus/blob/master/build/version.go) has been updated.
- [ ] Verify that codegen is up to date (`make gen && make docsgen-cli`)
@@ -79,7 +76,7 @@ Testing an RC:
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=false&target=releases).
-- [ ] **Post-Release**
+**Post-Release**
- [ ] Merge the `releases` branch back into `master`, ignoring the changes to `version.go` (keep the `-dev` version from master). Do NOT delete the `releases` branch when doing so!
- [ ] Update [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) with any improvements determined from this latest release iteration.
- [ ] Create an issue using [RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md) for the _next_ release.
diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi
index a458f638e..bf5edd551 160000
--- a/extern/filecoin-ffi
+++ b/extern/filecoin-ffi
@@ -1 +1 @@
-Subproject commit a458f638e3c8603c9b5a9ed9847c3af4597e46d4
+Subproject commit bf5edd551d23901fa565aac4ce94433afe0c278e
diff --git a/extern/test-vectors b/extern/test-vectors
index 28b0c45ea..195bc0659 160000
--- a/extern/test-vectors
+++ b/extern/test-vectors
@@ -1 +1 @@
-Subproject commit 28b0c45eab4c302864af0aeaaff813625cfafe97
+Subproject commit 195bc065973ec35826621823964a5c3cbe5fa56d
diff --git a/gateway/node.go b/gateway/node.go
index 811cc79d3..367e645c1 100644
--- a/gateway/node.go
+++ b/gateway/node.go
@@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
@@ -77,6 +78,11 @@ type TargetAPI interface {
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
+ StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
+ StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
+ StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
+ StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
@@ -138,6 +144,8 @@ type TargetAPI interface {
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
Web3ClientVersion(ctx context.Context) (string, error)
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
}
var _ TargetAPI = *new(api.FullNode) // gateway depends on latest
diff --git a/gateway/proxy_eth.go b/gateway/proxy_eth.go
index a07ead16c..e6d433a17 100644
--- a/gateway/proxy_eth.go
+++ b/gateway/proxy_eth.go
@@ -16,18 +16,11 @@ import (
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/chain/events/filter"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
)
-func (gw *Node) Web3ClientVersion(ctx context.Context) (string, error) {
- if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
- return "", err
- }
-
- return gw.target.Web3ClientVersion(ctx)
-}
-
func (gw *Node) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) {
// gateway provides public API, so it can't hold user accounts
return []ethtypes.EthAddress{}, nil
@@ -427,7 +420,7 @@ func (gw *Node) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID
ft.lk.Unlock()
if !ok {
- return nil, nil
+ return nil, filter.ErrFilterNotFound
}
return gw.target.EthGetFilterChanges(ctx, id)
@@ -581,6 +574,38 @@ func (gw *Node) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionI
return ok, nil
}
+func (gw *Node) Web3ClientVersion(ctx context.Context) (string, error) {
+ if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
+ return "", err
+ }
+
+ return gw.target.Web3ClientVersion(ctx)
+}
+
+func (gw *Node) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+
+ if err := gw.checkBlkParam(ctx, blkNum, 0); err != nil {
+ return nil, err
+ }
+
+ return gw.target.EthTraceBlock(ctx, blkNum)
+}
+
+func (gw *Node) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+
+ if err := gw.checkBlkParam(ctx, blkNum, 0); err != nil {
+ return nil, err
+ }
+
+ return gw.target.EthTraceReplayBlockTransactions(ctx, blkNum, traceTypes)
+}
+
var EthMaxFiltersPerConn = 16 // todo make this configurable
func addUserFilterLimited(ctx context.Context, cb func() (ethtypes.EthFilterID, error)) (ethtypes.EthFilterID, error) {
diff --git a/gateway/proxy_fil.go b/gateway/proxy_fil.go
index abd5371fe..eb8a354ed 100644
--- a/gateway/proxy_fil.go
+++ b/gateway/proxy_fil.go
@@ -10,6 +10,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
+ verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/network"
@@ -579,3 +580,53 @@ func (gw *Node) WalletBalance(ctx context.Context, k address.Address) (types.Big
}
return gw.target.WalletBalance(ctx, k)
}
+
+func (gw *Node) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetAllocationForPendingDeal(ctx, dealId, tsk)
+}
+
+func (gw *Node) StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetAllocation(ctx, clientAddr, allocationId, tsk)
+}
+
+func (gw *Node) StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetAllocations(ctx, clientAddr, tsk)
+}
+
+func (gw *Node) StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetClaim(ctx, providerAddr, claimId, tsk)
+}
+
+func (gw *Node) StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
+ if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
+ return nil, err
+ }
+ if err := gw.checkTipsetKey(ctx, tsk); err != nil {
+ return nil, err
+ }
+ return gw.target.StateGetClaims(ctx, providerAddr, tsk)
+}
diff --git a/gen/inlinegen-data.json b/gen/inlinegen-data.json
index 5208f3912..cf72d24fa 100644
--- a/gen/inlinegen-data.json
+++ b/gen/inlinegen-data.json
@@ -1,7 +1,7 @@
{
- "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
- "latestActorsVersion": 11,
+ "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+ "latestActorsVersion": 12,
- "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
- "latestNetworkVersion": 20
+ "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
+ "latestNetworkVersion": 21
}
diff --git a/go.mod b/go.mod
index ac0950eed..4c654c16e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/filecoin-project/lotus
-go 1.19
+go 1.20
retract v1.14.0 // Accidentally force-pushed tag, use v1.14.1+ instead.
@@ -33,7 +33,7 @@ require (
github.com/filecoin-project/dagstore v0.5.2
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20220519234331-bfd1f5f9fe38
github.com/filecoin-project/go-address v1.1.0
- github.com/filecoin-project/go-amt-ipld/v4 v4.0.0
+ github.com/filecoin-project/go-amt-ipld/v4 v4.2.0
github.com/filecoin-project/go-bitfield v0.2.4
github.com/filecoin-project/go-cbor-util v0.0.1
github.com/filecoin-project/go-commp-utils v0.1.3
@@ -45,7 +45,7 @@ require (
github.com/filecoin-project/go-jsonrpc v0.3.1
github.com/filecoin-project/go-padreader v0.0.1
github.com/filecoin-project/go-paramfetch v0.0.4
- github.com/filecoin-project/go-state-types v0.11.1
+ github.com/filecoin-project/go-state-types v0.12.1
github.com/filecoin-project/go-statemachine v1.0.3
github.com/filecoin-project/go-statestore v0.2.0
github.com/filecoin-project/go-storedcounter v0.1.0
@@ -59,7 +59,7 @@ require (
github.com/filecoin-project/specs-actors/v6 v6.0.2
github.com/filecoin-project/specs-actors/v7 v7.0.1
github.com/filecoin-project/specs-actors/v8 v8.0.1
- github.com/filecoin-project/test-vectors/schema v0.0.5
+ github.com/filecoin-project/test-vectors/schema v0.0.7
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0
github.com/georgysavva/scany/v2 v2.0.0
@@ -75,7 +75,8 @@ require (
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
github.com/hashicorp/go-hclog v1.3.0
github.com/hashicorp/go-multierror v1.1.1
- github.com/hashicorp/golang-lru/v2 v2.0.2
+ github.com/hashicorp/golang-lru/arc/v2 v2.0.5
+ github.com/hashicorp/golang-lru/v2 v2.0.5
github.com/hashicorp/raft v1.3.10
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
@@ -92,6 +93,7 @@ require (
github.com/ipfs/go-fs-lock v0.0.7
github.com/ipfs/go-graphsync v0.14.6
github.com/ipfs/go-ipfs-blocksutil v0.0.1
+ github.com/ipfs/go-ipfs-exchange-offline v0.3.0
github.com/ipfs/go-ipld-cbor v0.0.6
github.com/ipfs/go-ipld-format v0.5.0
github.com/ipfs/go-log/v2 v2.5.1
@@ -109,7 +111,7 @@ require (
github.com/kelseyhightower/envconfig v1.4.0
github.com/koalacxr/quantile v0.0.1
github.com/libp2p/go-buffer-pool v0.1.0
- github.com/libp2p/go-libp2p v0.27.6
+ github.com/libp2p/go-libp2p v0.30.0
github.com/libp2p/go-libp2p-consensus v0.0.1
github.com/libp2p/go-libp2p-gorpc v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.24.0
@@ -124,9 +126,10 @@ require (
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
github.com/mitchellh/go-homedir v1.1.0
github.com/multiformats/go-base32 v0.1.0
- github.com/multiformats/go-multiaddr v0.9.0
+ github.com/multiformats/go-multiaddr v0.11.0
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multibase v0.2.0
+ github.com/multiformats/go-multicodec v0.9.0
github.com/multiformats/go-multihash v0.2.3
github.com/multiformats/go-varint v0.0.7
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
@@ -153,17 +156,17 @@ require (
go.opentelemetry.io/otel/exporters/jaeger v1.14.0
go.opentelemetry.io/otel/sdk v1.16.0
go.uber.org/atomic v1.11.0
- go.uber.org/fx v1.19.3
+ go.uber.org/fx v1.20.0
go.uber.org/multierr v1.11.0
- go.uber.org/zap v1.24.0
- golang.org/x/crypto v0.10.0
- golang.org/x/exp v0.0.0-20230321023759-10a507213a29
- golang.org/x/net v0.10.0
- golang.org/x/sync v0.2.0
- golang.org/x/sys v0.10.0
- golang.org/x/term v0.9.0
+ go.uber.org/zap v1.25.0
+ golang.org/x/crypto v0.12.0
+ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
+ golang.org/x/net v0.14.0
+ golang.org/x/sync v0.3.0
+ golang.org/x/sys v0.11.0
+ golang.org/x/term v0.11.0
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9
- golang.org/x/tools v0.9.1
+ golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2
gopkg.in/cheggaaa/pb.v1 v1.0.28
gotest.tools v2.2.0+incompatible
@@ -232,7 +235,7 @@ require (
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gopacket v1.1.19 // indirect
- github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
+ github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
@@ -270,7 +273,7 @@ require (
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kilic/bls12-381 v0.1.0 // indirect
- github.com/klauspost/compress v1.16.5 // indirect
+ github.com/klauspost/compress v1.16.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
@@ -279,18 +282,18 @@ require (
github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect
github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.6.1 // indirect
- github.com/libp2p/go-nat v0.1.0 // indirect
+ github.com/libp2p/go-nat v0.2.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
- github.com/libp2p/go-reuseport v0.2.0 // indirect
- github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
+ github.com/libp2p/go-reuseport v0.4.0 // indirect
+ github.com/libp2p/go-yamux/v4 v4.0.1 // indirect
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
github.com/magefile/mage v1.9.0 // indirect
- github.com/mailru/easyjson v0.7.6 // indirect
+ github.com/mailru/easyjson v0.7.7 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.10 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
- github.com/miekg/dns v1.1.54 // indirect
+ github.com/miekg/dns v1.1.55 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
@@ -299,12 +302,11 @@ require (
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
- github.com/multiformats/go-multicodec v0.9.0 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/nikkolasg/hexjson v0.1.0 // indirect
github.com/nkovacs/streamquote v1.0.0 // indirect
- github.com/onsi/ginkgo/v2 v2.9.7 // indirect
- github.com/opencontainers/runtime-spec v1.0.2 // indirect
+ github.com/onsi/ginkgo/v2 v2.11.0 // indirect
+ github.com/opencontainers/runtime-spec v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
@@ -315,9 +317,8 @@ require (
github.com/prometheus/procfs v0.10.1 // indirect
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
- github.com/quic-go/qtls-go1-19 v0.3.2 // indirect
- github.com/quic-go/qtls-go1-20 v0.2.2 // indirect
- github.com/quic-go/quic-go v0.33.0 // indirect
+ github.com/quic-go/qtls-go1-20 v0.3.3 // indirect
+ github.com/quic-go/quic-go v0.37.6 // indirect
github.com/quic-go/webtransport-go v0.5.3 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
github.com/rs/cors v1.7.0 // indirect
@@ -345,8 +346,8 @@ require (
go.uber.org/dig v1.17.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
golang.org/x/arch v0.3.0 // indirect
- golang.org/x/mod v0.10.0 // indirect
- golang.org/x/text v0.10.0 // indirect
+ golang.org/x/mod v0.12.0 // indirect
+ golang.org/x/text v0.12.0 // indirect
gonum.org/v1/gonum v0.13.0 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.55.0 // indirect
@@ -355,7 +356,6 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
lukechampine.com/blake3 v1.2.1 // indirect
- nhooyr.io/websocket v1.8.7 // indirect
)
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi
diff --git a/go.sum b/go.sum
index 5fb7cda2d..9beece7f2 100644
--- a/go.sum
+++ b/go.sum
@@ -295,8 +295,9 @@ github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoC
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o=
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE=
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo=
-github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 h1:XM81BJ4/6h3FV0WfFjh74cIDIgqMbJsMBLM0fIuLUUk=
github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE=
+github.com/filecoin-project/go-amt-ipld/v4 v4.2.0 h1:DQTXQwMXxaetd+lhZGODjt5qC1WYT7tMAlYrWqI/fwI=
+github.com/filecoin-project/go-amt-ipld/v4 v4.2.0/go.mod h1:0eDVF7pROvxrsxvLJx+SJZXqRaXXcEPUcgb/rG0zGU4=
github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk=
@@ -343,8 +344,9 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
-github.com/filecoin-project/go-state-types v0.11.1 h1:GDtCN9V18bYVwXDZe+vJXc6Ck+qY9OUaQqpoVlp1FAk=
github.com/filecoin-project/go-state-types v0.11.1/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8=
+github.com/filecoin-project/go-state-types v0.12.1 h1:/1ip/jXIP4QzWd3hlaQ7RGp1DHKKYG3+NOhd/r08UJY=
+github.com/filecoin-project/go-state-types v0.12.1/go.mod h1:KOBGyvCalT8uHBS7KSKOVbjsilD90bBZHgLAqrzz6gU=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk=
github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54=
@@ -375,8 +377,8 @@ github.com/filecoin-project/specs-actors/v7 v7.0.1 h1:w72xCxijK7xs1qzmJiw+WYJaVt
github.com/filecoin-project/specs-actors/v7 v7.0.1/go.mod h1:tPLEYXoXhcpyLh69Ccq91SOuLXsPWjHiY27CzawjUEk=
github.com/filecoin-project/specs-actors/v8 v8.0.1 h1:4u0tIRJeT5G7F05lwLRIsDnsrN+bJ5Ixj6h49Q7uE2Y=
github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftREdJwv9b/5yaLKdCgTvNI/2FA=
-github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
-github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
+github.com/filecoin-project/test-vectors/schema v0.0.7 h1:hhrcxLnQR2Oe6fjk63hZXG1fWQGyxgCVXOOlAlR/D9A=
+github.com/filecoin-project/test-vectors/schema v0.0.7/go.mod h1:WqdmeJrz0V37wp7DucRR/bvrScZffqaCyIk9G0BGw1o=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
@@ -407,7 +409,6 @@ github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazz
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
-github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
@@ -456,15 +457,12 @@ github.com/go-openapi/swag v0.19.11 h1:RFTu/dlFySpyVvJDfp/7674JY4SDglYWKztbiIGFp
github.com/go-openapi/swag v0.19.11/go.mod h1:Uc0gKkdR+ojzsEpjh39QChyu92vPgIr72POcgHMAgSY=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
@@ -477,12 +475,6 @@ github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSC
github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs=
github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q=
github.com/gobuffalo/packr/v2 v2.6.0/go.mod h1:sgEE1xNZ6G0FNN5xn9pevVu4nywaxHvgup67xisti08=
-github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=
-github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
-github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=
-github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=
-github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
@@ -578,8 +570,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs=
-github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
+github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo=
+github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -657,8 +649,10 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
-github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
+github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
+github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
+github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
@@ -785,6 +779,7 @@ github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSO
github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0=
github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY=
github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA=
+github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s=
github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4=
github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ=
@@ -960,13 +955,13 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
-github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
-github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
-github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
+github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/koalacxr/quantile v0.0.1 h1:wAW+SQ286Erny9wOjVww96t8ws+x5Zj6AKHDULUK+o0=
@@ -990,7 +985,6 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
@@ -1019,8 +1013,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0=
-github.com/libp2p/go-libp2p v0.27.6 h1:KmGU5kskCaaerm53heqzfGOlrW2z8icZ+fnyqgrZs38=
-github.com/libp2p/go-libp2p v0.27.6/go.mod h1:oMfQGTb9CHnrOuSM6yMmyK2lXz3qIhnkn2+oK3B1Y2g=
+github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U=
+github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg=
github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s=
github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w=
github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8=
@@ -1157,8 +1151,8 @@ github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbx
github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI=
github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo=
github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU=
-github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg=
-github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM=
+github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
+github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk=
github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk=
github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A=
@@ -1172,8 +1166,8 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO
github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA=
github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ=
-github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560=
-github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k=
+github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
+github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs=
github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM=
github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw=
@@ -1201,8 +1195,8 @@ github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/h
github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ=
-github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ=
-github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
+github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ=
+github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8=
@@ -1218,8 +1212,9 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs=
github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
@@ -1258,8 +1253,8 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
-github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
+github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
+github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
@@ -1316,8 +1311,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u
github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI=
github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc=
github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0=
-github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ=
-github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0=
+github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10=
+github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM=
github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q=
github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0=
@@ -1395,20 +1390,21 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss=
-github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
+github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU=
+github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
+github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0=
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI=
-github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
+github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w=
@@ -1510,12 +1506,10 @@ github.com/puzpuzpuz/xsync/v2 v2.4.0 h1:5sXAMHrtx1bg9nbRZTOn8T4MkWe5V+o8yKRH02Ez
github.com/puzpuzpuz/xsync/v2 v2.4.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
-github.com/quic-go/qtls-go1-19 v0.3.2 h1:tFxjCFcTQzK+oMxG6Zcvp4Dq8dx4yD3dDiIiyc86Z5U=
-github.com/quic-go/qtls-go1-19 v0.3.2/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
-github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E=
-github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
-github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0=
-github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA=
+github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM=
+github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k=
+github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY=
+github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU=
github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU=
github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU=
github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y=
@@ -1657,10 +1651,8 @@ github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg=
github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
-github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
@@ -1799,8 +1791,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
-go.uber.org/fx v1.19.3 h1:YqMRE4+2IepTYCMOvXqQpRa+QAVdiSTnsHU4XNWBceA=
-go.uber.org/fx v1.19.3/go.mod h1:w2HrQg26ql9fLK7hlBiZ6JsRUKV+Lj/atT1KCjT8YhM=
+go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ=
+go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0=
go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
@@ -1819,8 +1811,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c=
+go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
@@ -1868,8 +1860,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
-golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
+golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -1883,8 +1875,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc=
golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc=
-golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
-golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
+golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1910,8 +1902,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
-golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1973,8 +1965,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1998,8 +1990,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
-golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -2096,6 +2088,7 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -2104,16 +2097,16 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
-golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
-golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
+golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2123,8 +2116,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
-golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2191,8 +2184,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
-golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
+golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2356,8 +2349,6 @@ lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
-nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g=
-nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
diff --git a/itests/api_test.go b/itests/api_test.go
index ff303df3e..c87012cfe 100644
--- a/itests/api_test.go
+++ b/itests/api_test.go
@@ -28,12 +28,17 @@ func TestAPI(t *testing.T) {
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
+ ts := apiSuite{}
+ t.Run("testMiningReal", ts.testMiningReal)
+ ts.opts = append(ts.opts, kit.ThroughRPC())
+ t.Run("testMiningReal", ts.testMiningReal)
+
//stm: @CHAIN_STATE_MINER_INFO_001
t.Run("direct", func(t *testing.T) {
- runAPITest(t)
+ runAPITest(t, kit.MockProofs())
})
t.Run("rpc", func(t *testing.T) {
- runAPITest(t, kit.ThroughRPC())
+ runAPITest(t, kit.MockProofs(), kit.ThroughRPC())
})
}
@@ -49,7 +54,6 @@ func runAPITest(t *testing.T, opts ...interface{}) {
t.Run("id", ts.testID)
t.Run("testConnectTwo", ts.testConnectTwo)
t.Run("testMining", ts.testMining)
- t.Run("testMiningReal", ts.testMiningReal)
t.Run("testSlowNotify", ts.testSlowNotify)
t.Run("testSearchMsg", ts.testSearchMsg)
t.Run("testOutOfGasError", ts.testOutOfGasError)
diff --git a/itests/batch_deal_test.go b/itests/batch_deal_test.go
index 68b276a0c..21db9f08d 100644
--- a/itests/batch_deal_test.go
+++ b/itests/batch_deal_test.go
@@ -61,7 +61,6 @@ func TestBatchDealInput(t *testing.T) {
sc.MaxSealingSectorsForDeals = 3
sc.AlwaysKeepUnsealedCopy = true
sc.WaitDealsDelay = time.Hour
- sc.BatchPreCommits = false
sc.AggregateCommits = false
return sc, nil
diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go
deleted file mode 100644
index 030e115f8..000000000
--- a/itests/ccupgrade_test.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// stm: #integration
-package itests
-
-import (
- "context"
- "fmt"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/network"
-
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/itests/kit"
-)
-
-func TestCCUpgrade(t *testing.T) {
- //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
- //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
- //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
- //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
-
- //stm: @CHAIN_STATE_MINER_GET_INFO_001
- //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
-
- //stm: @MINER_SECTOR_LIST_001
- kit.QuietMiningLogs()
-
- n := runTestCCUpgrade(t)
-
- t.Run("post", func(t *testing.T) {
- ctx := context.Background()
- ts, err := n.ChainHead(ctx)
- require.NoError(t, err)
- start := ts.Height()
- // wait for a full proving period
- t.Log("waiting for chain")
-
- n.WaitTillChain(ctx, func(ts *types.TipSet) bool {
- if ts.Height() > start+abi.ChainEpoch(2880) {
- return true
- }
- return false
- })
- })
-}
-
-func runTestCCUpgrade(t *testing.T) *kit.TestFullNode {
- ctx := context.Background()
- blockTime := 1 * time.Millisecond
-
- client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC())
- ens.InterconnectAll().BeginMiningMustPost(blockTime)
-
- maddr, err := miner.ActorAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
- fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
-
- miner.PledgeSectors(ctx, 1, 0, nil)
- sl, err := miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
- require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
-
- si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
- require.NoError(t, err)
- require.NotNil(t, si)
- require.Less(t, 50000, int(si.Expiration))
- require.True(t, si.ReplacedSectorAge == 0)
-
- client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
-
- //stm: @SECTOR_CC_UPGRADE_001
- err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
- require.NoError(t, err)
-
- sl, err = miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
-
- dh := kit.NewDealHarness(t, client, miner, miner)
- deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
- Rseed: 6,
- SuspendUntilCryptoeconStable: true,
- })
- outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
- kit.AssertFilesEqual(t, inPath, outPath)
-
- status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
- require.NoError(t, err)
- assert.Equal(t, 1, len(status.Deals))
-
- miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
- CCUpgrade: {},
- })
-
- siUpdate, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
- require.NoError(t, err)
- require.NotNil(t, siUpdate)
- require.True(t, siUpdate.SectorKeyCID != nil)
- require.True(t, siUpdate.Activation > si.Activation)
-
- return client
-}
-
-func TestCCUpgradeAndPoSt(t *testing.T) {
- kit.QuietMiningLogs()
- t.Run("upgrade and then post", func(t *testing.T) {
- ctx := context.Background()
- n := runTestCCUpgrade(t)
- ts, err := n.ChainHead(ctx)
- require.NoError(t, err)
- start := ts.Height()
- // wait for a full proving period
- t.Log("waiting for chain")
-
- n.WaitTillChain(ctx, func(ts *types.TipSet) bool {
- if ts.Height() > start+abi.ChainEpoch(2880) {
- return true
- }
- return false
- })
- })
-}
diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go
index 472e66abc..fb28f4509 100644
--- a/itests/deadlines_test.go
+++ b/itests/deadlines_test.go
@@ -18,7 +18,6 @@ import (
"github.com/filecoin-project/go-state-types/builtin"
minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner"
"github.com/filecoin-project/go-state-types/exitcode"
- "github.com/filecoin-project/go-state-types/network"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
"github.com/filecoin-project/lotus/api"
@@ -34,28 +33,6 @@ import (
)
// TestDeadlineToggling:
-// * spins up a v3 network (miner A)
-// * creates an inactive miner (miner B)
-// * creates another miner, pledges a sector, waits for power (miner C)
-//
-// * goes through v4 upgrade
-// * goes through PP
-// * creates minerD, minerE
-// * makes sure that miner B/D are inactive, A/C still are
-// * pledges sectors on miner B/D
-// * precommits a sector on minerE
-// * disables post on miner C
-// * goes through PP 0.5PP
-// * asserts that minerE is active
-// * goes through rest of PP (1.5)
-// * asserts that miner C loses power
-// * asserts that miner B/D is active and has power
-// * asserts that minerE is inactive
-// * disables post on miner B
-// * terminates sectors on miner D
-// * goes through another PP
-// * asserts that miner B loses power
-// * asserts that miner D loses power, is inactive
func TestDeadlineToggling(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
@@ -71,7 +48,6 @@ func TestDeadlineToggling(t *testing.T) {
const sectorsC, sectorsD, sectorsB = 10, 9, 8
var (
- upgradeH abi.ChainEpoch = 4000
provingPeriod abi.ChainEpoch = 2880
blocktime = 2 * time.Millisecond
)
@@ -81,14 +57,14 @@ func TestDeadlineToggling(t *testing.T) {
var (
client kit.TestFullNode
- minerA kit.TestMiner
- minerB kit.TestMiner
- minerC kit.TestMiner
- minerD kit.TestMiner
- minerE kit.TestMiner
+ minerA kit.TestMiner // A has some genesis sector, just keeps power
+ minerB kit.TestMiner // B pledges some sector, later fails some posts but stays alive
+ minerC kit.TestMiner // C pledges sectors, gains power, and later stops its PoSTs, but stays alive
+ minerD kit.TestMiner // D pledges sectors and later terminates them, losing all power, eventually deactivates cron
+ minerE kit.TestMiner // E pre-commits a sector but doesn't advance beyond that, cron should become inactive
)
opts := []kit.NodeOpt{kit.WithAllSubsystems()}
- ens := kit.NewEnsemble(t, kit.MockProofs(), kit.TurboUpgradeAt(upgradeH)).
+ ens := kit.NewEnsemble(t, kit.MockProofs()).
FullNode(&client, opts...).
Miner(&minerA, &client, opts...).
Start().
@@ -116,6 +92,8 @@ func TestDeadlineToggling(t *testing.T) {
ssz, err := minerC.ActorSectorSize(ctx, maddrC)
require.NoError(t, err)
+ targetHeight := abi.ChainEpoch(0)
+
// pledge sectors on C, go through a PP, check for power
{
minerC.PledgeSectors(ctx, sectorsC, 0, nil)
@@ -127,11 +105,13 @@ func TestDeadlineToggling(t *testing.T) {
t.Log("Running one proving period (miner C)")
t.Logf("End for head.Height > %d", di.PeriodStart+di.WPoStProvingPeriod*2)
+ targetHeight = di.PeriodStart + provingPeriod*2
+
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
- if head.Height() > di.PeriodStart+provingPeriod*2 {
+ if head.Height() > targetHeight {
t.Logf("Now head.Height = %d", head.Height())
break
}
@@ -148,18 +128,6 @@ func TestDeadlineToggling(t *testing.T) {
require.Equal(t, p.MinerPower.RawBytePower, expectedPower)
}
- // go through upgrade + PP
- for {
- head, err := client.ChainHead(ctx)
- require.NoError(t, err)
-
- if head.Height() > upgradeH+provingPeriod {
- t.Logf("Now head.Height = %d", head.Height())
- break
- }
- build.Clock.Sleep(blocktime)
- }
-
checkMiner := func(ma address.Address, power abi.StoragePower, active bool, tsk types.TipSetKey) {
//stm: @CHAIN_STATE_MINER_POWER_001
p, err := client.StateMinerPower(ctx, ma, tsk)
@@ -181,18 +149,6 @@ func TestDeadlineToggling(t *testing.T) {
require.Equal(t, active, act)
}
- // check that just after the upgrade minerB was still active
- {
- uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK)
- require.NoError(t, err)
- checkMiner(maddrB, types.NewInt(0), true, uts.Key())
- }
-
- //stm: @CHAIN_STATE_NETWORK_VERSION_001
- nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK)
- require.NoError(t, err)
- require.GreaterOrEqual(t, nv, network.Version12)
-
ens.Miner(&minerD, &client, opts...).
Miner(&minerE, &client, opts...).
Start()
@@ -254,12 +210,14 @@ func TestDeadlineToggling(t *testing.T) {
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
}
+ targetHeight = targetHeight + (provingPeriod / 2)
+
// go through 0.5 PP
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
- if head.Height() > upgradeH+provingPeriod+(provingPeriod/2) {
+ if head.Height() > targetHeight {
t.Logf("Now head.Height = %d", head.Height())
break
}
@@ -268,12 +226,14 @@ func TestDeadlineToggling(t *testing.T) {
checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
+ targetHeight = targetHeight + (provingPeriod/2)*5
+
// go through rest of the PP
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
- if head.Height() > upgradeH+(provingPeriod*3) {
+ if head.Height() > targetHeight {
t.Logf("Now head.Height = %d", head.Height())
break
}
@@ -285,7 +245,12 @@ func TestDeadlineToggling(t *testing.T) {
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, types.EmptyTSK)
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, types.EmptyTSK)
- checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
+
+ // Note: in the older version of this test `active` would be set to false
+ // this is now true because the time to commit a precommit a sector has
+ // increased to 30 days. We could keep the original assert and increase the
+ // wait above to 30 days, but that makes the test take 14 minutes to run..
+ checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
// disable post on minerB
minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
@@ -344,12 +309,14 @@ func TestDeadlineToggling(t *testing.T) {
require.True(t, p.MinerPower.RawBytePower.IsZero())
}
+ targetHeight = targetHeight + provingPeriod*2
+
// go through another PP
for {
head, err := client.ChainHead(ctx)
require.NoError(t, err)
- if head.Height() > upgradeH+(provingPeriod*5) {
+ if head.Height() > targetHeight {
t.Logf("Now head.Height = %d", head.Height())
break
}
diff --git a/itests/deals_test.go b/itests/deals_test.go
index e8296ea87..a6953d07e 100644
--- a/itests/deals_test.go
+++ b/itests/deals_test.go
@@ -22,7 +22,7 @@ func TestDealsWithSealingAndRPC(t *testing.T) {
kit.QuietMiningLogs()
client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.WithAllSubsystems()) // no mock proofs.
- ens.InterconnectAll().BeginMining(250 * time.Millisecond)
+ ens.InterconnectAll().BeginMiningMustPost(250 * time.Millisecond)
dh := kit.NewDealHarness(t, client, miner, miner)
t.Run("stdretrieval", func(t *testing.T) {
diff --git a/itests/eth_transactions_test.go b/itests/eth_transactions_test.go
index 8d836573d..b39632795 100644
--- a/itests/eth_transactions_test.go
+++ b/itests/eth_transactions_test.go
@@ -310,13 +310,23 @@ func TestGetBlockByNumber(t *testing.T) {
afterNullHeight := hc[0].Val.Height()
+ nullHeight := afterNullHeight - 1
+ for nullHeight > 0 {
+ ts, err := client.ChainGetTipSetByHeight(ctx, nullHeight, types.EmptyTSK)
+ require.NoError(t, err)
+ if ts.Height() == nullHeight {
+ nullHeight--
+ } else {
+ break
+ }
+ }
+
// Fail when trying to fetch a null round.
- _, err = client.EthGetBlockByNumber(ctx, (ethtypes.EthUint64(afterNullHeight - 1)).Hex(), true)
+ _, err = client.EthGetBlockByNumber(ctx, (ethtypes.EthUint64(nullHeight)).Hex(), true)
require.Error(t, err)
// Fetch balance on a null round; should not fail and should return previous balance.
- // Should be lower than original balance.
- bal, err := client.EthGetBalance(ctx, ethAddr, ethtypes.NewEthBlockNumberOrHashFromNumber(ethtypes.EthUint64(afterNullHeight-1)))
+ bal, err := client.EthGetBalance(ctx, ethAddr, ethtypes.NewEthBlockNumberOrHashFromNumber(ethtypes.EthUint64(nullHeight)))
require.NoError(t, err)
require.NotEqual(t, big.Zero(), bal)
require.Equal(t, types.FromFil(10).Int, bal.Int)
diff --git a/itests/kit/deals.go b/itests/kit/deals.go
index 84e74124b..eb6b58667 100644
--- a/itests/kit/deals.go
+++ b/itests/kit/deals.go
@@ -87,6 +87,15 @@ func NewDealHarness(t *testing.T, client *TestFullNode, main *TestMiner, market
//
// TODO: convert input parameters to struct, and add size as an input param.
func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
+ deal, res, path = dh.StartRandomDeal(ctx, params)
+
+ fmt.Printf("WAIT DEAL SEALEDS START\n")
+ dh.WaitDealSealed(ctx, deal, false, false, nil)
+ fmt.Printf("WAIT DEAL SEALEDS END\n")
+ return deal, res, path
+}
+
+func (dh *DealHarness) StartRandomDeal(ctx context.Context, params MakeFullDealParams) (deal *cid.Cid, res *api.ImportRes, path string) {
if params.UseCARFileForStorageDeal {
res, _, path = dh.client.ClientImportCARFile(ctx, params.Rseed, 200)
} else {
@@ -107,11 +116,6 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa
dp.FastRetrieval = params.FastRet
deal = dh.StartDeal(ctx, dp)
- // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
- time.Sleep(time.Second)
- fmt.Printf("WAIT DEAL SEALEDS START\n")
- dh.WaitDealSealed(ctx, deal, false, false, nil)
- fmt.Printf("WAIT DEAL SEALEDS END\n")
return deal, res, path
}
diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go
index 04a02cf61..1b5fef501 100644
--- a/itests/kit/ensemble.go
+++ b/itests/kit/ensemble.go
@@ -171,6 +171,8 @@ func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble {
require.NoError(t, build.UseNetworkBundle("testing"))
}
+ build.EquivocationDelaySecs = 0
+
return n
}
diff --git a/itests/kit/ensemble_opts_nv.go b/itests/kit/ensemble_opts_nv.go
index a30ed0e32..d5bb1930e 100644
--- a/itests/kit/ensemble_opts_nv.go
+++ b/itests/kit/ensemble_opts_nv.go
@@ -23,20 +23,6 @@ func GenesisNetworkVersion(nv network.Version) EnsembleOpt {
})
}
-func SDRUpgradeAt(calico, persian abi.ChainEpoch) EnsembleOpt {
- return UpgradeSchedule(stmgr.Upgrade{
- Network: network.Version6,
- Height: -1,
- }, stmgr.Upgrade{
- Network: network.Version7,
- Height: calico,
- Migration: filcns.UpgradeCalico,
- }, stmgr.Upgrade{
- Network: network.Version8,
- Height: persian,
- })
-}
-
func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
/* inline-gen template
return UpgradeSchedule(stmgr.Upgrade{
@@ -49,23 +35,12 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
})
/* inline-gen start */
return UpgradeSchedule(stmgr.Upgrade{
- Network: network.Version19,
+ Network: network.Version20,
Height: -1,
}, stmgr.Upgrade{
- Network: network.Version20,
+ Network: network.Version21,
Height: upgradeHeight,
- Migration: filcns.UpgradeActorsV11,
+ Migration: filcns.UpgradeActorsV12,
})
/* inline-gen end */
}
-
-func TurboUpgradeAt(upgradeHeight abi.ChainEpoch) EnsembleOpt {
- return UpgradeSchedule(stmgr.Upgrade{
- Network: network.Version11,
- Height: -1,
- }, stmgr.Upgrade{
- Network: network.Version12,
- Height: upgradeHeight,
- Migration: filcns.UpgradeActorsV4,
- })
-}
diff --git a/itests/kit/log.go b/itests/kit/log.go
index 0da9adfeb..0c66427f9 100644
--- a/itests/kit/log.go
+++ b/itests/kit/log.go
@@ -21,6 +21,7 @@ func QuietMiningLogs() {
_ = logging.SetLogLevel("pubsub", "ERROR")
_ = logging.SetLogLevel("gen", "ERROR")
_ = logging.SetLogLevel("rpc", "ERROR")
+ _ = logging.SetLogLevel("consensus-common", "ERROR")
_ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR")
}
diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go
index 5d418c5be..6469c0a30 100644
--- a/itests/kit/node_opts.go
+++ b/itests/kit/node_opts.go
@@ -197,7 +197,7 @@ func OwnerAddr(wk *key.Key) NodeOpt {
// the node.
func ConstructorOpts(extra ...node.Option) NodeOpt {
return func(opts *nodeOpts) error {
- opts.extraNodeOpts = extra
+ opts.extraNodeOpts = append(opts.extraNodeOpts, extra...)
return nil
}
}
@@ -290,6 +290,13 @@ func SplitstoreMessges() NodeOpt {
})
}
+func SplitstoreDisable() NodeOpt {
+ return WithCfgOpt(func(cfg *config.FullNode) error {
+ cfg.Chainstore.EnableSplitstore = false
+ return nil
+ })
+}
+
func WithEthRPC() NodeOpt {
return WithCfgOpt(func(cfg *config.FullNode) error {
cfg.Fevm.EnableEthRPC = true
diff --git a/itests/msgindex_test.go b/itests/msgindex_test.go
index cb5fd85c9..807ab3c03 100644
--- a/itests/msgindex_test.go
+++ b/itests/msgindex_test.go
@@ -93,7 +93,7 @@ func testSearchMsgWithIndex(t *testing.T, makeMsgIndex func(cs *store.ChainStore
// copy of apiSuite.testSearchMsgWith; needs to be copied or else CI is angry, tests are built individually there
ctx := context.Background()
- full, _, ens := kit.EnsembleMinimal(t, kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex)))
+ full, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex)))
senderAddr, err := full.WalletDefaultAddress(ctx)
require.NoError(t, err)
diff --git a/itests/remove_verifreg_datacap_test.go b/itests/remove_verifreg_datacap_test.go
index 3fd241748..ac88574d2 100644
--- a/itests/remove_verifreg_datacap_test.go
+++ b/itests/remove_verifreg_datacap_test.go
@@ -275,7 +275,7 @@ func TestNoRemoveDatacapFromVerifreg(t *testing.T) {
Params: params,
Value: big.Zero(),
}, types.EmptyTSK)
- require.Error(t, err)
+ require.NoError(t, err)
require.False(t, callResult.MsgRct.ExitCode.IsSuccess())
verifregDatacapAfter, err := clientApi.StateVerifiedClientStatus(ctx, builtin.VerifiedRegistryActorAddr, types.EmptyTSK)
diff --git a/itests/sdr_upgrade_test.go b/itests/sdr_upgrade_test.go
deleted file mode 100644
index d92d4edc9..000000000
--- a/itests/sdr_upgrade_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// stm: #integration
-package itests
-
-import (
- "context"
- "sort"
- "sync/atomic"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/network"
-
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/itests/kit"
- bminer "github.com/filecoin-project/lotus/miner"
-)
-
-func TestSDRUpgrade(t *testing.T) {
- //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
- //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
- //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
- //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
-
- //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
- //stm: @CHAIN_STATE_NETWORK_VERSION_001
-
- //stm: @MINER_SECTOR_LIST_001
- kit.QuietMiningLogs()
-
- // oldDelay := policy.GetPreCommitChallengeDelay()
- // policy.SetPreCommitChallengeDelay(5)
- // t.Cleanup(func() {
- // policy.SetPreCommitChallengeDelay(oldDelay)
- // })
-
- blocktime := 50 * time.Millisecond
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- client, miner, ens := kit.EnsembleMinimal(t,
- kit.MockProofs(),
- kit.SDRUpgradeAt(500, 1000),
- )
- ens.InterconnectAll()
-
- build.Clock.Sleep(time.Second)
-
- pledge := make(chan struct{})
- mine := int64(1)
- done := make(chan struct{})
- go func() {
- defer close(done)
- round := 0
- for atomic.LoadInt64(&mine) != 0 {
- build.Clock.Sleep(blocktime)
- if err := miner.MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
-
- }}); err != nil {
- t.Error(err)
- }
-
- // 3 sealing rounds: before, during after.
- if round >= 3 {
- continue
- }
-
- head, err := client.ChainHead(ctx)
- assert.NoError(t, err)
-
- // rounds happen every 100 blocks, with a 50 block offset.
- if head.Height() >= abi.ChainEpoch(round*500+50) {
- round++
- pledge <- struct{}{}
-
- ver, err := client.StateNetworkVersion(ctx, head.Key())
- assert.NoError(t, err)
- switch round {
- case 1:
- assert.Equal(t, network.Version6, ver)
- case 2:
- assert.Equal(t, network.Version7, ver)
- case 3:
- assert.Equal(t, network.Version8, ver)
- }
- }
-
- }
- }()
-
- // before.
- miner.PledgeSectors(ctx, 9, 0, pledge)
-
- s, err := miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- sort.Slice(s, func(i, j int) bool {
- return s[i] < s[j]
- })
-
- for i, id := range s {
- info, err := miner.SectorsStatus(ctx, id, true)
- require.NoError(t, err)
- expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
- if i >= 3 {
- // after
- expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
- }
- assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
- }
-
- atomic.StoreInt64(&mine, 0)
- <-done
-}
diff --git a/itests/sector_make_cc_avail_test.go b/itests/sector_make_cc_avail_test.go
deleted file mode 100644
index 524b3c70f..000000000
--- a/itests/sector_make_cc_avail_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package itests
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/network"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/itests/kit"
- "github.com/filecoin-project/lotus/node/config"
- sealing "github.com/filecoin-project/lotus/storage/pipeline"
-)
-
-func TestMakeAvailable(t *testing.T) {
- kit.QuietMiningLogs()
-
- ctx := context.Background()
- blockTime := 1 * time.Millisecond
-
- client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) {
- sc.MakeCCSectorsAvailable = true
- }))
- ens.InterconnectAll().BeginMiningMustPost(blockTime)
-
- maddr, err := miner.ActorAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
-
- miner.PledgeSectors(ctx, 1, 0, nil)
- sl, err := miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
- require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
- {
- si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
- require.NoError(t, err)
- require.NotNil(t, si)
- require.Less(t, 50000, int(si.Expiration))
- }
- client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
-
- sl, err = miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
-
- status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
- require.NoError(t, err)
- assert.Equal(t, api.SectorState(sealing.Available), status.State)
-
- dh := kit.NewDealHarness(t, client, miner, miner)
- deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
- Rseed: 6,
- SuspendUntilCryptoeconStable: true,
- })
- outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
- kit.AssertFilesEqual(t, inPath, outPath)
-
- sl, err = miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
-
- status, err = miner.SectorsStatus(ctx, CCUpgrade, true)
- require.NoError(t, err)
- assert.Equal(t, 1, len(status.Deals))
- miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
- CCUpgrade: {},
- })
-}
diff --git a/itests/sector_miner_collateral_test.go b/itests/sector_miner_collateral_test.go
index 8d7abacee..579b4e535 100644
--- a/itests/sector_miner_collateral_test.go
+++ b/itests/sector_miner_collateral_test.go
@@ -51,7 +51,6 @@ func TestMinerBalanceCollateral(t *testing.T) {
sc.AlwaysKeepUnsealedCopy = true
sc.WaitDealsDelay = time.Hour
- sc.BatchPreCommits = batching
sc.AggregateCommits = batching
sc.PreCommitBatchWait = time.Hour
diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go
index 2ac1298d0..a2e74ef72 100644
--- a/itests/sector_pledge_test.go
+++ b/itests/sector_pledge_test.go
@@ -12,7 +12,6 @@ import (
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/network"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
"github.com/filecoin-project/lotus/api"
@@ -196,49 +195,3 @@ func TestPledgeMaxBatching(t *testing.T) {
t.Run("Force max prove commit aggregate size", runTest)
}
-
-func TestPledgeBeforeNv13(t *testing.T) {
- //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
- //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
- //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
- //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
-
- //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
- blocktime := 50 * time.Millisecond
-
- runTest := func(t *testing.T, nSectors int) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
- kit.GenesisNetworkVersion(network.Version12))
- ens.InterconnectAll().BeginMining(blocktime)
-
- client.WaitTillChain(ctx, kit.HeightAtLeast(10))
-
- toCheck := miner.StartPledge(ctx, nSectors, 0, nil)
-
- for len(toCheck) > 0 {
- states := map[api.SectorState]int{}
-
- for n := range toCheck {
- st, err := miner.SectorsStatus(ctx, n, false)
- require.NoError(t, err)
- states[st.State]++
- if st.State == api.SectorState(sealing.Proving) {
- delete(toCheck, n)
- }
- if strings.Contains(string(st.State), "Fail") {
- t.Fatal("sector in a failed state", st.State)
- }
- }
-
- build.Clock.Sleep(100 * time.Millisecond)
- fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
- }
- }
-
- t.Run("100-before-nv13", func(t *testing.T) {
- runTest(t, 100)
- })
-}
diff --git a/itests/sector_prefer_no_upgrade_test.go b/itests/sector_prefer_no_upgrade_test.go
deleted file mode 100644
index 96f07f9e4..000000000
--- a/itests/sector_prefer_no_upgrade_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package itests
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/network"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/itests/kit"
- "github.com/filecoin-project/lotus/node/config"
- sealing "github.com/filecoin-project/lotus/storage/pipeline"
-)
-
-func TestPreferNoUpgrade(t *testing.T) {
- kit.QuietMiningLogs()
-
- ctx := context.Background()
- blockTime := 1 * time.Millisecond
-
- client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) {
- sc.PreferNewSectorsForDeals = true
- }))
- ens.InterconnectAll().BeginMiningMustPost(blockTime)
-
- maddr, err := miner.ActorAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
- Sealed := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
-
- {
- miner.PledgeSectors(ctx, 1, 0, nil)
- sl, err := miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
- require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
- {
- si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
- require.NoError(t, err)
- require.NotNil(t, si)
- require.Less(t, 50000, int(si.Expiration))
- }
- client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
-
- err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
- require.NoError(t, err)
-
- sl, err = miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
- }
-
- {
- dh := kit.NewDealHarness(t, client, miner, miner)
- deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{
- Rseed: 6,
- SuspendUntilCryptoeconStable: true,
- })
- outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
- kit.AssertFilesEqual(t, inPath, outPath)
- }
-
- sl, err := miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 2, "expected 2 sectors")
-
- {
- status, err := miner.SectorsStatus(ctx, CCUpgrade, true)
- require.NoError(t, err)
- assert.Equal(t, api.SectorState(sealing.Available), status.State)
- }
-
- {
- status, err := miner.SectorsStatus(ctx, Sealed, true)
- require.NoError(t, err)
- assert.Equal(t, 1, len(status.Deals))
- miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{
- Sealed: {},
- })
- }
-}
diff --git a/itests/sector_revert_available_test.go b/itests/sector_revert_available_test.go
deleted file mode 100644
index 41a46024f..000000000
--- a/itests/sector_revert_available_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package itests
-
-import (
- "context"
- "fmt"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/abi"
- "github.com/filecoin-project/go-state-types/network"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/chain/types"
- "github.com/filecoin-project/lotus/itests/kit"
- sealing "github.com/filecoin-project/lotus/storage/pipeline"
-)
-
-func TestAbortUpgradeAvailable(t *testing.T) {
- kit.QuietMiningLogs()
-
- ctx := context.Background()
- blockTime := 1 * time.Millisecond
-
- client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC())
- ens.InterconnectAll().BeginMiningMustPost(blockTime)
-
- maddr, err := miner.ActorAddress(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
- fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
-
- miner.PledgeSectors(ctx, 1, 0, nil)
- sl, err := miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
- require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
- {
- si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK)
- require.NoError(t, err)
- require.NotNil(t, si)
- require.Less(t, 50000, int(si.Expiration))
- }
- client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
-
- err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
- require.NoError(t, err)
-
- sl, err = miner.SectorsListNonGenesis(ctx)
- require.NoError(t, err)
- require.Len(t, sl, 1, "expected 1 sector")
-
- ss, err := miner.SectorsStatus(ctx, sl[0], false)
- require.NoError(t, err)
-
- for i := 0; i < 100; i++ {
- ss, err = miner.SectorsStatus(ctx, sl[0], false)
- require.NoError(t, err)
- if ss.State == api.SectorState(sealing.Proving) {
- time.Sleep(50 * time.Millisecond)
- continue
- }
-
- require.Equal(t, api.SectorState(sealing.Available), ss.State)
- break
- }
-
- require.NoError(t, miner.SectorAbortUpgrade(ctx, sl[0]))
-
- for i := 0; i < 100; i++ {
- ss, err = miner.SectorsStatus(ctx, sl[0], false)
- require.NoError(t, err)
- if ss.State == api.SectorState(sealing.Available) {
- time.Sleep(50 * time.Millisecond)
- continue
- }
-
- require.Equal(t, api.SectorState(sealing.Proving), ss.State)
- break
- }
-}
diff --git a/itests/tape_test.go b/itests/tape_test.go
deleted file mode 100644
index e0db4882c..000000000
--- a/itests/tape_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// stm: #integration
-package itests
-
-import (
- "context"
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-
- "github.com/filecoin-project/go-state-types/network"
-
- "github.com/filecoin-project/lotus/api"
- "github.com/filecoin-project/lotus/build"
- "github.com/filecoin-project/lotus/itests/kit"
- sealing "github.com/filecoin-project/lotus/storage/pipeline"
-)
-
-func TestTapeFix(t *testing.T) {
- //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
- //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
- //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
- //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
-
- //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
- kit.QuietMiningLogs()
-
- var blocktime = 2 * time.Millisecond
-
- // The "before" case is disabled, because we need the builder to mock 32 GiB sectors to accurately repro this case
- // TODO: Make the mock sector size configurable and reenable this
- // t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
- t.Run("after", func(t *testing.T) { testTapeFix(t, blocktime, true) })
-}
-
-func testTapeFix(t *testing.T, blocktime time.Duration, after bool) {
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- networkVersion := network.Version4
- if after {
- networkVersion = network.Version5
- }
-
- _, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(networkVersion))
- ens.InterconnectAll().BeginMining(blocktime)
-
- sid, err := miner.PledgeSector(ctx)
- require.NoError(t, err)
-
- t.Log("All sectors is fsm")
-
- // If before, we expect the precommit to fail
- successState := api.SectorState(sealing.CommitFailed)
- failureState := api.SectorState(sealing.Proving)
- if after {
- // otherwise, it should succeed.
- successState, failureState = failureState, successState
- }
-
- for {
- st, err := miner.SectorsStatus(ctx, sid.Number, false)
- require.NoError(t, err)
- if st.State == successState {
- break
- }
- require.NotEqual(t, failureState, st.State)
- build.Clock.Sleep(100 * time.Millisecond)
- t.Log("WaitSeal")
- }
-}
diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go
index e942d6c71..ffe50c72b 100644
--- a/itests/verifreg_test.go
+++ b/itests/verifreg_test.go
@@ -9,16 +9,20 @@ import (
"testing"
"time"
+ "github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/builtin"
+ datacap2 "github.com/filecoin-project/go-state-types/builtin/v9/datacap"
verifregst "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/network"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/actors/builtin/datacap"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/wallet/key"
@@ -225,36 +229,8 @@ func TestRemoveDataCap(t *testing.T) {
// make the 2 verifiers
- makeVerifier := func(addr address.Address) error {
- allowance := big.NewInt(100000000000)
- params, aerr := actors.SerializeParams(&verifregst.AddVerifierParams{Address: addr, Allowance: allowance})
- require.NoError(t, aerr)
-
- msg := &types.Message{
- From: rootAddr,
- To: verifreg.Address,
- Method: verifreg.Methods.AddVerifier,
- Params: params,
- Value: big.Zero(),
- }
-
- sm, err := api.MpoolPushMessage(ctx, msg, nil)
- require.NoError(t, err, "AddVerifier failed")
-
- //stm: @CHAIN_STATE_WAIT_MSG_001
- res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
- require.NoError(t, err)
- require.EqualValues(t, 0, res.Receipt.ExitCode)
-
- verifierAllowance, err := api.StateVerifierStatus(ctx, addr, types.EmptyTSK)
- require.NoError(t, err)
- require.Equal(t, allowance, *verifierAllowance)
-
- return nil
- }
-
- require.NoError(t, makeVerifier(verifier1Addr))
- require.NoError(t, makeVerifier(verifier2Addr))
+ makeVerifier(ctx, t, api, rootAddr, verifier1Addr)
+ makeVerifier(ctx, t, api, rootAddr, verifier2Addr)
// assign datacap to a client
datacap := big.NewInt(10000)
@@ -374,3 +350,156 @@ func TestRemoveDataCap(t *testing.T) {
require.NoError(t, err)
require.Nil(t, dcap, "expected datacap to be nil")
}
+
+func TestVerifiedClientCanCreateAllocation(t *testing.T) {
+ blockTime := 100 * time.Millisecond
+
+ rootKey, err := key.GenerateKey(types.KTSecp256k1)
+ require.NoError(t, err)
+
+ verifier1Key, err := key.GenerateKey(types.KTSecp256k1)
+ require.NoError(t, err)
+
+ verifiedClientKey, err := key.GenerateKey(types.KTBLS)
+ require.NoError(t, err)
+
+ bal, err := types.ParseFIL("100fil")
+ require.NoError(t, err)
+
+ node, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(),
+ kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())),
+ kit.Account(verifier1Key, abi.NewTokenAmount(bal.Int64())),
+ kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())),
+ )
+
+ ens.InterconnectAll().BeginMining(blockTime)
+
+ api := node.FullNode.(*impl.FullNodeAPI)
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // get VRH
+ vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{})
+ fmt.Println(vrh.String())
+ require.NoError(t, err)
+
+ // import the root key.
+ rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo)
+ require.NoError(t, err)
+
+ // import the verifiers' keys.
+ verifier1Addr, err := api.WalletImport(ctx, &verifier1Key.KeyInfo)
+ require.NoError(t, err)
+
+ // import the verified client's key.
+ verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo)
+ require.NoError(t, err)
+
+ // resolve all keys
+
+ // make the 2 verifiers
+
+ makeVerifier(ctx, t, api, rootAddr, verifier1Addr)
+
+ // assign datacap to a client
+ initialDatacap := big.NewInt(10000)
+
+ params, err := actors.SerializeParams(&verifregst.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: initialDatacap})
+ require.NoError(t, err)
+
+ msg := &types.Message{
+ From: verifier1Addr,
+ To: verifreg.Address,
+ Method: verifreg.Methods.AddVerifiedClient,
+ Params: params,
+ Value: big.Zero(),
+ }
+
+ sm, err := api.MpoolPushMessage(ctx, msg, nil)
+ require.NoError(t, err)
+
+ res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, res.Receipt.ExitCode)
+
+ // check datacap balance
+ dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
+ require.NoError(t, err)
+ require.Equal(t, *dcap, initialDatacap)
+
+ minerId, err := address.IDFromAddress(miner.ActorAddr)
+ require.NoError(t, err)
+
+ allocationRequest := verifregst.AllocationRequest{
+ Provider: abi.ActorID(minerId),
+ Data: cid.MustParse("bafkqaaa"),
+ Size: abi.PaddedPieceSize(initialDatacap.Uint64()),
+ TermMin: verifregst.MinimumVerifiedAllocationTerm,
+ TermMax: verifregst.MinimumVerifiedAllocationTerm,
+ Expiration: verifregst.MaximumVerifiedAllocationExpiration,
+ }
+
+ allocationRequests := verifregst.AllocationRequests{
+ Allocations: []verifregst.AllocationRequest{allocationRequest},
+ }
+
+ receiverParams, err := actors.SerializeParams(&allocationRequests)
+ require.NoError(t, err)
+
+ transferParams, err := actors.SerializeParams(&datacap2.TransferParams{
+ To: builtin.VerifiedRegistryActorAddr,
+ Amount: big.Mul(initialDatacap, builtin.TokenPrecision),
+ OperatorData: receiverParams,
+ })
+ require.NoError(t, err)
+
+ msg = &types.Message{
+ To: builtin.DatacapActorAddr,
+ From: verifiedClientAddr,
+ Method: datacap.Methods.TransferExported,
+ Params: transferParams,
+ Value: big.Zero(),
+ }
+
+ sm, err = api.MpoolPushMessage(ctx, msg, nil)
+ require.NoError(t, err)
+
+ res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, res.Receipt.ExitCode)
+
+ // check datacap balance
+ dcap, err = api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK)
+ require.NoError(t, err)
+ require.Nil(t, dcap)
+
+ allocations, err := api.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(allocations))
+}
+
+func makeVerifier(ctx context.Context, t *testing.T, api *impl.FullNodeAPI, rootAddr address.Address, addr address.Address) {
+ allowance := big.NewInt(100000000000)
+ params, aerr := actors.SerializeParams(&verifregst.AddVerifierParams{Address: addr, Allowance: allowance})
+ require.NoError(t, aerr)
+
+ msg := &types.Message{
+ From: rootAddr,
+ To: verifreg.Address,
+ Method: verifreg.Methods.AddVerifier,
+ Params: params,
+ Value: big.Zero(),
+ }
+
+ sm, err := api.MpoolPushMessage(ctx, msg, nil)
+ require.NoError(t, err, "AddVerifier failed")
+
+ res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
+ require.NoError(t, err)
+ require.EqualValues(t, 0, res.Receipt.ExitCode)
+
+ verifierAllowance, err := api.StateVerifierStatus(ctx, addr, types.EmptyTSK)
+ require.NoError(t, err)
+ require.Equal(t, allowance, *verifierAllowance)
+}
diff --git a/itests/wdpost_test.go b/itests/wdpost_test.go
index 08c9d4343..2a6fc866e 100644
--- a/itests/wdpost_test.go
+++ b/itests/wdpost_test.go
@@ -224,70 +224,6 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int,
require.Equal(t, nSectors+kit.DefaultPresealsPerBootstrapMiner-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
}
-func TestWindowPostBaseFeeNoBurn(t *testing.T) {
- //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
- //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
- //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
- //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
-
- //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
- kit.Expensive(t)
-
- kit.QuietMiningLogs()
-
- var (
- blocktime = 2 * time.Millisecond
- nSectors = 10
- )
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- och := build.UpgradeClausHeight
- build.UpgradeClausHeight = 0
- t.Cleanup(func() { build.UpgradeClausHeight = och })
-
- client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version9))
- ens.InterconnectAll().BeginMining(blocktime)
-
- maddr, err := miner.ActorAddress(ctx)
- require.NoError(t, err)
-
- //stm: @CHAIN_STATE_MINER_INFO_001
- mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- miner.PledgeSectors(ctx, nSectors, 0, nil)
- //stm: @CHAIN_STATE_GET_ACTOR_001
- wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
- require.NoError(t, err)
- en := wact.Nonce
-
- // wait for a new message to be sent from worker address, it will be a PoSt
-
-waitForProof:
- for {
- //stm: @CHAIN_STATE_GET_ACTOR_001
- wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
- require.NoError(t, err)
- if wact.Nonce > en {
- break waitForProof
- }
-
- build.Clock.Sleep(blocktime)
- }
-
- //stm: @CHAIN_STATE_LIST_MESSAGES_001
- slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
- require.NoError(t, err)
-
- //stm: @CHAIN_STATE_REPLAY_001
- pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0])
- require.NoError(t, err)
-
- require.Equal(t, pmr.GasCost.BaseFeeBurn, big.Zero())
-}
-
func TestWindowPostBaseFeeBurn(t *testing.T) {
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
@@ -345,79 +281,6 @@ waitForProof:
require.NotEqual(t, pmr.GasCost.BaseFeeBurn, big.Zero())
}
-// Tests that V1_1 proofs are generated and accepted in nv19, and V1 proofs are accepted
-func TestWindowPostV1P1NV19(t *testing.T) {
- kit.QuietMiningLogs()
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- blocktime := 2 * time.Millisecond
-
- client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version19))
- ens.InterconnectAll().BeginMining(blocktime)
-
- maddr, err := miner.ActorAddress(ctx)
- require.NoError(t, err)
-
- mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK)
- require.NoError(t, err)
-
- wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
- require.NoError(t, err)
- en := wact.Nonce
-
- // wait for a new message to be sent from worker address, it will be a PoSt
-
-waitForProof:
- for {
- wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK)
- require.NoError(t, err)
- if wact.Nonce > en {
- break waitForProof
- }
-
- build.Clock.Sleep(blocktime)
- }
-
- slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0)
- require.NoError(t, err)
-
- pmr, err := client.StateSearchMsg(ctx, types.EmptyTSK, slm[0], -1, false)
- require.NoError(t, err)
-
- inclTs, err := client.ChainGetTipSet(ctx, pmr.TipSet)
- require.NoError(t, err)
-
- inclTsParents, err := client.ChainGetTipSet(ctx, inclTs.Parents())
- require.NoError(t, err)
-
- nv, err := client.StateNetworkVersion(ctx, pmr.TipSet)
- require.NoError(t, err)
- require.Equal(t, network.Version19, nv)
-
- require.True(t, pmr.Receipt.ExitCode.IsSuccess())
-
- slmsg, err := client.ChainGetMessage(ctx, slm[0])
- require.NoError(t, err)
-
- var params miner11.SubmitWindowedPoStParams
- require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
- require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
-
- // "Turn" this into a V1 proof -- the proof will be invalid, but won't be validated, and so the call should succeed
- params.Proofs[0].PoStProof = abi.RegisteredPoStProof_StackedDrgWindow2KiBV1
- v1PostParams := new(bytes.Buffer)
- require.NoError(t, params.MarshalCBOR(v1PostParams))
-
- slmsg.Params = v1PostParams.Bytes()
-
- // Simulate call on inclTsParents's parents, so that the partition isn't already proven
- call, err := client.StateCall(ctx, slmsg, inclTsParents.Parents())
- require.NoError(t, err)
- require.True(t, call.MsgRct.ExitCode.IsSuccess())
-}
-
// Tests that V1_1 proofs are generated and accepted in nv20, and that V1 proofs are NOT
func TestWindowPostV1P1NV20(t *testing.T) {
kit.QuietMiningLogs()
@@ -484,8 +347,9 @@ waitForProof:
slmsg.Params = v1PostParams.Bytes()
// Simulate call on inclTs's parents, so that the partition isn't already proven
- _, err = client.StateCall(ctx, slmsg, inclTs.Parents())
- require.ErrorContains(t, err, "expected proof of type StackedDRGWindow2KiBV1P1, got StackedDRGWindow2KiBV1")
+ ret, err := client.StateCall(ctx, slmsg, inclTs.Parents())
+ require.NoError(t, err)
+ require.Contains(t, ret.Error, "expected proof of type StackedDRGWindow2KiBV1P1, got StackedDRGWindow2KiBV1")
for {
//stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001
diff --git a/itests/worker_test.go b/itests/worker_test.go
index 246c842c5..c4f885fb0 100644
--- a/itests/worker_test.go
+++ b/itests/worker_test.go
@@ -730,3 +730,82 @@ waitForProof:
require.NoError(t, params.UnmarshalCBOR(bytes.NewBuffer(slmsg.Params)))
require.Equal(t, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1, params.Proofs[0].PoStProof)
}
+
+func TestWorkerPledgeExpireCommit(t *testing.T) {
+ kit.QuietMiningLogs()
+ _ = logging.SetLogLevel("sectors", "debug")
+
+ var tasksNoC2 = kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTAddPiece, sealtasks.TTDataCid, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2,
+ sealtasks.TTUnseal, sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFinalizeUnsealed})
+
+ fc := config.DefaultStorageMiner().Fees
+ fc.MaxCommitGasFee = types.FIL(abi.NewTokenAmount(10000)) // 10000 attofil, way too low for anything to land
+
+ ctx := context.Background()
+ client, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
+ kit.MutateSealingConfig(func(sc *config.SealingConfig) {
+ sc.AggregateCommits = true
+ }),
+ kit.ConstructorOpts(
+ node.Override(new(*sealing.Sealing), modules.SealingPipeline(fc)),
+ ),
+ kit.SplitstoreDisable(), // disable splitstore because messages which take a long time may get dropped
+ tasksNoC2) // no mock proofs
+
+ ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)
+
+ e, err := worker.Enabled(ctx)
+ require.NoError(t, err)
+ require.True(t, e)
+
+ dh := kit.NewDealHarness(t, client, miner, miner)
+
+ startEpoch := abi.ChainEpoch(4 << 10)
+
+ dh.StartRandomDeal(ctx, kit.MakeFullDealParams{
+ Rseed: 7,
+ StartEpoch: startEpoch,
+ })
+
+ var sn abi.SectorNumber
+
+ require.Eventually(t, func() bool {
+ s, err := miner.SectorsListNonGenesis(ctx)
+ require.NoError(t, err)
+ if len(s) == 0 {
+ return false
+ }
+ if len(s) > 1 {
+ t.Fatalf("expected 1 sector, got %d", len(s))
+ }
+ sn = s[0]
+ return true
+ }, 30*time.Second, 1*time.Second)
+
+ t.Log("sector", sn)
+
+ t.Log("sector committing")
+
+ // wait until after startEpoch
+ client.WaitTillChain(ctx, kit.HeightAtLeast(startEpoch+20))
+
+ t.Log("after start")
+
+ sstate, err := miner.SectorsStatus(ctx, sn, false)
+ require.NoError(t, err)
+ require.Equal(t, api.SectorState(sealing.SubmitCommitAggregate), sstate.State)
+
+ _, err = miner.SectorCommitFlush(ctx)
+ require.NoError(t, err)
+
+ require.Eventually(t, func() bool {
+ sstate, err := miner.SectorsStatus(ctx, sn, false)
+ require.NoError(t, err)
+
+ t.Logf("sector state: %s", sstate.State)
+
+ return sstate.State == api.SectorState(sealing.Removed)
+ }, 30*time.Second, 1*time.Second)
+
+ t.Log("sector removed")
+}
diff --git a/lib/consensus/raft/raft.go b/lib/consensus/raft/raft.go
index 8ff93caf2..8541e6f87 100644
--- a/lib/consensus/raft/raft.go
+++ b/lib/consensus/raft/raft.go
@@ -561,33 +561,3 @@ func find(s []string, elem string) bool {
}
return false
}
-
-func (rw *raftWrapper) observePeers() {
- obsCh := make(chan hraft.Observation, 1)
- defer close(obsCh)
-
- observer := hraft.NewObserver(obsCh, true, func(o *hraft.Observation) bool {
- po, ok := o.Data.(hraft.PeerObservation)
- return ok && po.Removed
- })
-
- rw.raft.RegisterObserver(observer)
- defer rw.raft.DeregisterObserver(observer)
-
- for {
- select {
- case obs := <-obsCh:
- pObs := obs.Data.(hraft.PeerObservation)
- logger.Info("raft peer departed. Removing from peerstore: ", pObs.Peer.ID)
- pID, err := peer.Decode(string(pObs.Peer.ID))
- if err != nil {
- logger.Error(err)
- continue
- }
- rw.host.Peerstore().ClearAddrs(pID)
- case <-rw.ctx.Done():
- logger.Debug("stopped observing raft peers")
- return
- }
- }
-}
diff --git a/lib/harmony/harmonytask/doc.go b/lib/harmony/harmonytask/doc.go
index 772b674cd..9eaace914 100644
--- a/lib/harmony/harmonytask/doc.go
+++ b/lib/harmony/harmonytask/doc.go
@@ -32,12 +32,12 @@ Mental Model:
*
To use:
1.Implement TaskInterface for a new task.
-2 Have New() receive this & all other ACTIVE implementations.
+2. Have New() receive this & all other ACTIVE implementations.
*
*
As we are not expecting DBAs in this database, it's important to know
-what grows uncontrolled. The only harmony_* table is _task_history
-(somewhat quickly) and harmony_machines (slowly). These will need a
+what grows uncontrolled. The only growing harmony_* table is
+harmony_task_history (somewhat quickly). These will need a
clean-up for after the task data could never be acted upon.
but the design **requires** extraInfo tables to grow until the task's
info could not possibly be used by a following task, including slow
diff --git a/lib/shardedmutex/shardedmutex.go b/lib/shardedmutex/shardedmutex.go
index 922ac3994..4d1c11bdf 100644
--- a/lib/shardedmutex/shardedmutex.go
+++ b/lib/shardedmutex/shardedmutex.go
@@ -11,8 +11,8 @@ const cacheline = 64
// name old time/op new time/op delta
// Locks-8 74.6ns ± 7% 12.3ns ± 2% -83.54% (p=0.000 n=20+18)
type paddedMutex struct {
- mt sync.Mutex
- pad [cacheline - 8]uint8
+ mt sync.Mutex
+ _ [cacheline - 8]uint8
}
type ShardedMutex struct {
diff --git a/markets/dagstore/wrapper.go b/markets/dagstore/wrapper.go
index b5813dc5e..a929ad1fc 100644
--- a/markets/dagstore/wrapper.go
+++ b/markets/dagstore/wrapper.go
@@ -48,7 +48,6 @@ type Wrapper struct {
dagst dagstore.Interface
minerAPI MinerAPI
failureCh chan dagstore.ShardResult
- traceCh chan dagstore.Trace
gcInterval time.Duration
}
@@ -64,9 +63,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
// The dagstore will write Shard failures to the `failureCh` here.
failureCh := make(chan dagstore.ShardResult, 1)
- // The dagstore will write Trace events to the `traceCh` here.
- traceCh := make(chan dagstore.Trace, 32)
-
var (
transientsDir = filepath.Join(cfg.RootDir, "transients")
datastoreDir = filepath.Join(cfg.RootDir, "datastore")
@@ -90,7 +86,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
Datastore: dstore,
MountRegistry: registry,
FailureCh: failureCh,
- TraceCh: traceCh,
TopLevelIndex: topIndex,
// not limiting fetches globally, as the Lotus mount does
// conditional throttling.
@@ -109,7 +104,6 @@ func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*da
dagst: dagst,
minerAPI: minerApi,
failureCh: failureCh,
- traceCh: traceCh,
gcInterval: time.Duration(cfg.GCInterval),
}
@@ -146,10 +140,6 @@ func (w *Wrapper) Start(ctx context.Context) error {
w.backgroundWg.Add(1)
go w.gcLoop()
- // run a go-routine to read the trace for debugging.
- w.backgroundWg.Add(1)
- go w.traceLoop()
-
// Run a go-routine for shard recovery
if dss, ok := w.dagst.(*dagstore.DAGStore); ok {
w.backgroundWg.Add(1)
@@ -159,24 +149,6 @@ func (w *Wrapper) Start(ctx context.Context) error {
return w.dagst.Start(ctx)
}
-func (w *Wrapper) traceLoop() {
- defer w.backgroundWg.Done()
-
- for w.ctx.Err() == nil {
- select {
- // Log trace events from the DAG store
- case tr := <-w.traceCh:
- log.Debugw("trace",
- "shard-key", tr.Key.String(),
- "op-type", tr.Op.String(),
- "after", tr.After.String())
-
- case <-w.ctx.Done():
- return
- }
- }
-}
-
func (w *Wrapper) gcLoop() {
defer w.backgroundWg.Done()
diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go
index a8b22c62a..54ddb73b3 100644
--- a/markets/storageadapter/ondealsectorcommitted.go
+++ b/markets/storageadapter/ondealsectorcommitted.go
@@ -13,6 +13,7 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin"
+ miner2 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
"github.com/filecoin-project/go-state-types/builtin/v9/market"
@@ -107,7 +108,10 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context,
// Watch for a pre-commit message to the provider.
matchEvent := func(msg *types.Message) (bool, error) {
- matched := msg.To == provider && (msg.Method == builtin.MethodsMiner.PreCommitSector || msg.Method == builtin.MethodsMiner.PreCommitSectorBatch || msg.Method == builtin.MethodsMiner.ProveReplicaUpdates)
+ matched := msg.To == provider && (msg.Method == builtin.MethodsMiner.PreCommitSector ||
+ msg.Method == builtin.MethodsMiner.PreCommitSectorBatch ||
+ msg.Method == builtin.MethodsMiner.PreCommitSectorBatch2 ||
+ msg.Method == builtin.MethodsMiner.ProveReplicaUpdates)
return matched, nil
}
@@ -333,6 +337,21 @@ func dealSectorInPreCommitMsg(msg *types.Message, res pipeline.CurrentDealInfo)
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
}
+ for _, precommit := range params.Sectors {
+ // Check through the deal IDs associated with this message
+ for _, did := range precommit.DealIDs {
+ if did == res.DealID {
+ // Found the deal ID in this message. Callback with the sector ID.
+ return &precommit.SectorNumber, nil
+ }
+ }
+ }
+ case builtin.MethodsMiner.PreCommitSectorBatch2:
+ var params miner2.PreCommitSectorBatchParams2
+ if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
+ return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
+ }
+
for _, precommit := range params.Sectors {
// Check through the deal IDs associated with this message
for _, did := range precommit.DealIDs {
diff --git a/metrics/metrics.go b/metrics/metrics.go
index b1c241b21..50b47ad69 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -116,6 +116,7 @@ var (
PubsubDeliverMessage = stats.Int64("pubsub/delivered", "Counter for total delivered messages", stats.UnitDimensionless)
PubsubRejectMessage = stats.Int64("pubsub/rejected", "Counter for total rejected messages", stats.UnitDimensionless)
PubsubDuplicateMessage = stats.Int64("pubsub/duplicate", "Counter for total duplicate messages", stats.UnitDimensionless)
+ PubsubPruneMessage = stats.Int64("pubsub/prune", "Counter for total prune messages", stats.UnitDimensionless)
PubsubRecvRPC = stats.Int64("pubsub/recv_rpc", "Counter for total received RPCs", stats.UnitDimensionless)
PubsubSendRPC = stats.Int64("pubsub/send_rpc", "Counter for total sent RPCs", stats.UnitDimensionless)
PubsubDropRPC = stats.Int64("pubsub/drop_rpc", "Counter for total dropped RPCs", stats.UnitDimensionless)
@@ -326,6 +327,10 @@ var (
Measure: PubsubDuplicateMessage,
Aggregation: view.Count(),
}
+ PubsubPruneMessageView = &view.View{
+ Measure: PubsubPruneMessage,
+ Aggregation: view.Count(),
+ }
PubsubRecvRPCView = &view.View{
Measure: PubsubRecvRPC,
Aggregation: view.Count(),
@@ -769,6 +774,7 @@ var ChainNodeViews = append([]*view.View{
PubsubDeliverMessageView,
PubsubRejectMessageView,
PubsubDuplicateMessageView,
+ PubsubPruneMessageView,
PubsubRecvRPCView,
PubsubSendRPCView,
PubsubDropRPCView,
diff --git a/miner/miner.go b/miner/miner.go
index e1737009b..d1dee1ec9 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -10,7 +10,8 @@ import (
"sync"
"time"
- lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/hashicorp/golang-lru/arc/v2"
+ "github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
@@ -61,7 +62,7 @@ func randTimeOffset(width time.Duration) time.Duration {
// NewMiner instantiates a miner with a concrete WinningPoStProver and a miner
// address (which can be different from the worker's address).
func NewMiner(api v1api.FullNode, epp gen.WinningPoStProver, addr address.Address, sf *slashfilter.SlashFilter, j journal.Journal) *Miner {
- arc, err := lru.NewARC[abi.ChainEpoch, bool](10000)
+ arc, err := arc.NewARC[abi.ChainEpoch, bool](10000)
if err != nil {
panic(err)
}
@@ -122,7 +123,7 @@ type Miner struct {
// minedBlockHeights is a safeguard that caches the last heights we mined.
// It is consulted before publishing a newly mined block, for a sanity check
// intended to avoid slashings in case of a bug.
- minedBlockHeights *lru.ARCCache[abi.ChainEpoch, bool]
+ minedBlockHeights *arc.ARCCache[abi.ChainEpoch, bool]
evtTypes [1]journal.EventType
journal journal.Journal
@@ -324,9 +325,16 @@ minerLoop:
"block-time", btime, "time", build.Clock.Now(), "difference", build.Clock.Since(btime))
}
- if _, err = m.sf.MinedBlock(ctx, b.Header, base.TipSet.Height()+base.NullRounds); err != nil {
- log.Errorf(" SLASH FILTER ERROR: %s", err)
- if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" {
+ if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" {
+ witness, fault, err := m.sf.MinedBlock(ctx, b.Header, base.TipSet.Height()+base.NullRounds)
+ if err != nil {
+ log.Errorf(" SLASH FILTER ERRORED: %s", err)
+ // Continue here, because it's _probably_ wiser to not submit this block
+ continue
+ }
+
+ if fault {
+ log.Errorf(" SLASH FILTER DETECTED FAULT due to blocks %s and %s", b.Header.Cid(), witness)
continue
}
}
@@ -366,8 +374,9 @@ minerLoop:
// MiningBase is the tipset on top of which we plan to construct our next block.
// Refer to godocs on GetBestMiningCandidate.
type MiningBase struct {
- TipSet *types.TipSet
- NullRounds abi.ChainEpoch
+ TipSet *types.TipSet
+ ComputeTime time.Time
+ NullRounds abi.ChainEpoch
}
// GetBestMiningCandidate implements the fork choice rule from a miner's
@@ -405,7 +414,7 @@ func (m *Miner) GetBestMiningCandidate(ctx context.Context) (*MiningBase, error)
}
}
- m.lastWork = &MiningBase{TipSet: bts}
+ m.lastWork = &MiningBase{TipSet: bts, ComputeTime: time.Now()}
return m.lastWork, nil
}
@@ -500,13 +509,13 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
rbase = bvals[len(bvals)-1]
}
- ticket, err := m.computeTicket(ctx, &rbase, base, mbi)
+ ticket, err := m.computeTicket(ctx, &rbase, round, base.TipSet.MinTicket(), mbi)
if err != nil {
err = xerrors.Errorf("scratching ticket failed: %w", err)
return nil, err
}
- winner, err = gen.IsRoundWinner(ctx, base.TipSet, round, m.address, rbase, mbi, m.api)
+ winner, err = gen.IsRoundWinner(ctx, round, m.address, rbase, mbi, m.api)
if err != nil {
err = xerrors.Errorf("failed to check if we win next round: %w", err)
return nil, err
@@ -524,7 +533,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
return nil, err
}
- rand, err := lrand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
+ rand, err := lrand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
if err != nil {
err = xerrors.Errorf("failed to get randomness for winning post: %w", err)
return nil, err
@@ -547,7 +556,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
tProof := build.Clock.Now()
// get pending messages early,
- msgs, err := m.api.MpoolSelect(context.TODO(), base.TipSet.Key(), ticket.Quality())
+ msgs, err := m.api.MpoolSelect(ctx, base.TipSet.Key(), ticket.Quality())
if err != nil {
err = xerrors.Errorf("failed to select messages for block: %w", err)
return nil, err
@@ -555,6 +564,67 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
tPending := build.Clock.Now()
+ // This next block exists to "catch" equivocating miners,
+ // who submit 2 blocks at the same height at different times in order to split the network.
+ // To safeguard against this, we make sure it's been EquivocationDelaySecs since our base was calculated,
+ // then re-calculate it.
+ // If the daemon detected equivocated blocks, those blocks will no longer be in the new base.
+ m.niceSleep(time.Until(base.ComputeTime.Add(time.Duration(build.EquivocationDelaySecs) * time.Second)))
+ newBase, err := m.GetBestMiningCandidate(ctx)
+ if err != nil {
+ err = xerrors.Errorf("failed to refresh best mining candidate: %w", err)
+ return nil, err
+ }
+
+ tEquivocateWait := build.Clock.Now()
+
+ // If the base has changed, we take the _intersection_ of our old base and new base,
+ // thus ejecting blocks from any equivocating miners, without taking any new blocks.
+ if newBase.TipSet.Height() == base.TipSet.Height() && !newBase.TipSet.Equals(base.TipSet) {
+ log.Warnf("base changed from %s to %s, taking intersection", base.TipSet.Key(), newBase.TipSet.Key())
+ newBaseMap := map[cid.Cid]struct{}{}
+ for _, newBaseBlk := range newBase.TipSet.Cids() {
+ newBaseMap[newBaseBlk] = struct{}{}
+ }
+
+ refreshedBaseBlocks := make([]*types.BlockHeader, 0, len(base.TipSet.Cids()))
+ for _, baseBlk := range base.TipSet.Blocks() {
+ if _, ok := newBaseMap[baseBlk.Cid()]; ok {
+ refreshedBaseBlocks = append(refreshedBaseBlocks, baseBlk)
+ }
+ }
+
+ if len(refreshedBaseBlocks) != len(base.TipSet.Blocks()) {
+ refreshedBase, err := types.NewTipSet(refreshedBaseBlocks)
+ if err != nil {
+ err = xerrors.Errorf("failed to create new tipset when refreshing: %w", err)
+ return nil, err
+ }
+
+ if !base.TipSet.MinTicket().Equals(refreshedBase.MinTicket()) {
+ log.Warn("recomputing ticket due to base refresh")
+
+ ticket, err = m.computeTicket(ctx, &rbase, round, refreshedBase.MinTicket(), mbi)
+ if err != nil {
+ err = xerrors.Errorf("failed to refresh ticket: %w", err)
+ return nil, err
+ }
+ }
+
+ log.Warn("re-selecting messages due to base refresh")
+ // refresh messages, as the selected messages may no longer be valid
+ msgs, err = m.api.MpoolSelect(ctx, refreshedBase.Key(), ticket.Quality())
+ if err != nil {
+ err = xerrors.Errorf("failed to re-select messages for block: %w", err)
+ return nil, err
+ }
+
+ base.TipSet = refreshedBase
+ }
+ }
+
+ tIntersectAndRefresh := build.Clock.Now()
+
// TODO: winning post proof
minedBlock, err = m.createBlock(base, m.address, ticket, winner, bvals, postProof, msgs)
if err != nil {
@@ -569,31 +639,32 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type
parentMiners[i] = header.Miner
}
log.Infow("mined new block", "cid", minedBlock.Cid(), "height", int64(minedBlock.Header.Height), "miner", minedBlock.Header.Miner, "parents", parentMiners, "parentTipset", base.TipSet.Key().String(), "took", dur)
- if dur > time.Second*time.Duration(build.BlockDelaySecs) {
- log.Warnw("CAUTION: block production took longer than the block delay. Your computer may not be fast enough to keep up",
+ if dur > time.Second*time.Duration(build.BlockDelaySecs) || time.Now().Compare(time.Unix(int64(minedBlock.Header.Timestamp), 0)) >= 0 {
+ log.Warnw("CAUTION: block production took us past the block time. Your computer may not be fast enough to keep up",
"tPowercheck ", tPowercheck.Sub(tStart),
"tTicket ", tTicket.Sub(tPowercheck),
"tSeed ", tSeed.Sub(tTicket),
"tProof ", tProof.Sub(tSeed),
"tPending ", tPending.Sub(tProof),
- "tCreateBlock ", tCreateBlock.Sub(tPending))
+ "tEquivocateWait ", tEquivocateWait.Sub(tPending),
+ "tIntersectAndRefresh ", tIntersectAndRefresh.Sub(tEquivocateWait),
+ "tCreateBlock ", tCreateBlock.Sub(tIntersectAndRefresh))
}
return minedBlock, nil
}
-func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, base *MiningBase, mbi *api.MiningBaseInfo) (*types.Ticket, error) {
+func (m *Miner) computeTicket(ctx context.Context, brand *types.BeaconEntry, round abi.ChainEpoch, chainRand *types.Ticket, mbi *api.MiningBaseInfo) (*types.Ticket, error) {
buf := new(bytes.Buffer)
if err := m.address.MarshalCBOR(buf); err != nil {
return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err)
}
- round := base.TipSet.Height() + base.NullRounds + 1
if round > build.UpgradeSmokeHeight {
- buf.Write(base.TipSet.MinTicket().VRFProof)
+ buf.Write(chainRand.VRFProof)
}
- input, err := lrand.DrawRandomness(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
+ input, err := lrand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
if err != nil {
return nil, err
}
diff --git a/miner/testminer.go b/miner/testminer.go
index deda89f42..f1d11bae0 100644
--- a/miner/testminer.go
+++ b/miner/testminer.go
@@ -3,7 +3,7 @@ package miner
import (
"context"
- lru "github.com/hashicorp/golang-lru/v2"
+ "github.com/hashicorp/golang-lru/arc/v2"
ds "github.com/ipfs/go-datastore"
"github.com/filecoin-project/go-address"
@@ -22,7 +22,7 @@ type MineReq struct {
func NewTestMiner(nextCh <-chan MineReq, addr address.Address) func(v1api.FullNode, gen.WinningPoStProver) *Miner {
return func(api v1api.FullNode, epp gen.WinningPoStProver) *Miner {
- arc, err := lru.NewARC[abi.ChainEpoch, bool](10000)
+ arc, err := arc.NewARC[abi.ChainEpoch, bool](10000)
if err != nil {
panic(err)
}
diff --git a/node/builder.go b/node/builder.go
index e116fd807..e0f4dfe3a 100644
--- a/node/builder.go
+++ b/node/builder.go
@@ -128,6 +128,8 @@ const (
SetupFallbackBlockstoresKey
GoRPCServer
+ ConsensusReporterKey
+
SetApiEndpointKey
StoreEventsKey
diff --git a/node/builder_chain.go b/node/builder_chain.go
index fcdb26162..267659f00 100644
--- a/node/builder_chain.go
+++ b/node/builder_chain.go
@@ -280,6 +280,11 @@ func ConfigFullNode(c interface{}) Option {
// enable message index for full node when configured by the user, otherwise use dummy.
If(cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.MsgIndex)),
If(!cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.DummyMsgIndex)),
+
+ // enable fault reporter when configured by the user
+ If(cfg.FaultReporter.EnableConsensusFaultReporter,
+ Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)),
+ ),
)
}
diff --git a/node/config/def.go b/node/config/def.go
index aba7e340d..13c1a10aa 100644
--- a/node/config/def.go
+++ b/node/config/def.go
@@ -138,7 +138,6 @@ func DefaultStorageMiner() *StorageMiner {
AvailableBalanceBuffer: types.FIL(big.Zero()),
DisableCollateralFallback: false,
- BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors
PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket
// XXX snap deals wait deals slack if first
diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go
index 9c19d1953..613fd7c49 100644
--- a/node/config/doc_gen.go
+++ b/node/config/doc_gen.go
@@ -394,6 +394,35 @@ the database must already exist and be writeable. If a relative path is provided
relative to the CWD (current working directory).`,
},
},
+ "FaultReporterConfig": []DocField{
+ {
+ Name: "EnableConsensusFaultReporter",
+ Type: "bool",
+
+ Comment: `EnableConsensusFaultReporter controls whether the node will monitor and
+report consensus faults. When enabled, the node will watch for malicious
+behaviors like double-mining and parent grinding, and submit reports to the
+network. This can earn reporter rewards, but is not guaranteed. Nodes should
+enable fault reporting with care, as it may increase resource usage, and may
+generate gas fees without earning rewards.`,
+ },
+ {
+ Name: "ConsensusFaultReporterDataDir",
+ Type: "string",
+
+ Comment: `ConsensusFaultReporterDataDir is the path where fault reporter state will be
+persisted. This directory should have adequate space and permissions for the
+node process.`,
+ },
+ {
+ Name: "ConsensusFaultReporterAddress",
+ Type: "string",
+
+ Comment: `ConsensusFaultReporterAddress is the wallet address used for submitting
+ReportConsensusFault messages. It will pay for gas fees, and receive any
+rewards. This address should have adequate funds to cover gas fees.`,
+ },
+ },
"FeeConfig": []DocField{
{
Name: "DefaultMaxFee",
@@ -465,6 +494,12 @@ Set to 0 to keep all mappings`,
Name: "Index",
Type: "IndexConfig",
+ Comment: ``,
+ },
+ {
+ Name: "FaultReporter",
+ Type: "FaultReporterConfig",
+
Comment: ``,
},
},
@@ -1210,12 +1245,6 @@ This is useful for forcing all deals to be assigned as snap deals to sectors mar
Comment: `Don't send collateral with messages even if there is no available balance in the miner actor`,
},
- {
- Name: "BatchPreCommits",
- Type: "bool",
-
- Comment: `enable / disable precommit batching (takes effect after nv13)`,
- },
{
Name: "MaxPreCommitBatch",
Type: "int",
@@ -1269,7 +1298,8 @@ This is useful for forcing all deals to be assigned as snap deals to sectors mar
Type: "types.FIL",
Comment: `network BaseFee below which to stop doing precommit batching, instead
-sending precommit messages to the chain individually`,
+sending precommit messages to the chain individually. When the basefee is
+below this threshold, precommit messages will get sent out immediately.`,
},
{
Name: "AggregateAboveBaseFee",
diff --git a/node/config/types.go b/node/config/types.go
index 21c92e47b..74d5a22ed 100644
--- a/node/config/types.go
+++ b/node/config/types.go
@@ -22,13 +22,14 @@ type Common struct {
// FullNode is a full node config
type FullNode struct {
Common
- Client Client
- Wallet Wallet
- Fees FeeConfig
- Chainstore Chainstore
- Cluster UserRaftConfig
- Fevm FevmConfig
- Index IndexConfig
+ Client Client
+ Wallet Wallet
+ Fees FeeConfig
+ Chainstore Chainstore
+ Cluster UserRaftConfig
+ Fevm FevmConfig
+ Index IndexConfig
+ FaultReporter FaultReporterConfig
}
// // Common
@@ -393,8 +394,6 @@ type SealingConfig struct {
// Don't send collateral with messages even if there is no available balance in the miner actor
DisableCollateralFallback bool
- // enable / disable precommit batching (takes effect after nv13)
- BatchPreCommits bool
// maximum precommit batch size - batches will be sent immediately above this size
MaxPreCommitBatch int
// how long to wait before submitting a batch after crossing the minimum batch size
@@ -414,7 +413,8 @@ type SealingConfig struct {
CommitBatchSlack Duration
// network BaseFee below which to stop doing precommit batching, instead
- // sending precommit messages to the chain individually
+ // sending precommit messages to the chain individually. When the basefee is
+ // below this threshold, precommit messages will get sent out immediately.
BatchPreCommitAboveBaseFee types.FIL
// network BaseFee below which to stop doing commit aggregation, instead
@@ -757,3 +757,22 @@ type HarmonyDB struct {
// The port to find Yugabyte. Blank for default.
Port string
}
+type FaultReporterConfig struct {
+ // EnableConsensusFaultReporter controls whether the node will monitor and
+ // report consensus faults. When enabled, the node will watch for malicious
+ // behaviors like double-mining and parent grinding, and submit reports to the
+ // network. This can earn reporter rewards, but is not guaranteed. Nodes should
+ // enable fault reporting with care, as it may increase resource usage, and may
+ // generate gas fees without earning rewards.
+ EnableConsensusFaultReporter bool
+
+ // ConsensusFaultReporterDataDir is the path where fault reporter state will be
+ // persisted. This directory should have adequate space and permissions for the
+ // node process.
+ ConsensusFaultReporterDataDir string
+
+ // ConsensusFaultReporterAddress is the wallet address used for submitting
+ // ReportConsensusFault messages. It will pay for gas fees, and receive any
+ // rewards. This address should have adequate funds to cover gas fees.
+ ConsensusFaultReporterAddress string
+}
diff --git a/node/impl/client/client.go b/node/impl/client/client.go
index 3ed4a01a7..fff46acc7 100644
--- a/node/impl/client/client.go
+++ b/node/impl/client/client.go
@@ -527,7 +527,7 @@ func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.Impor
return nil, xerrors.Errorf("failed to read CAR header: %w", err)
}
if len(hd.Roots) != 1 {
- return nil, xerrors.New("car file can have one and only one header")
+ return nil, xerrors.New("car file can have one and only one root")
}
if hd.Version != 1 && hd.Version != 2 {
return nil, xerrors.Errorf("car version must be 1 or 2, is %d", hd.Version)
diff --git a/node/impl/full/dummy.go b/node/impl/full/dummy.go
index c4bda6428..743eadf34 100644
--- a/node/impl/full/dummy.go
+++ b/node/impl/full/dummy.go
@@ -178,5 +178,13 @@ func (e *EthModuleDummy) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubs
return false, ErrModuleDisabled
}
+func (e *EthModuleDummy) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
+ return nil, ErrModuleDisabled
+}
+
+func (e *EthModuleDummy) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ return nil, ErrModuleDisabled
+}
+
var _ EthModuleAPI = &EthModuleDummy{}
var _ EthEventAPI = &EthModuleDummy{}
diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go
index 424756f81..a051b49b1 100644
--- a/node/impl/full/eth.go
+++ b/node/impl/full/eth.go
@@ -3,20 +3,16 @@ package full
import (
"bytes"
"context"
- "encoding/json"
"errors"
"fmt"
"os"
"sort"
"strconv"
"strings"
- "sync"
"time"
- "github.com/google/uuid"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
- "github.com/zyedidia/generic/queue"
"go.uber.org/fx"
"golang.org/x/xerrors"
@@ -24,10 +20,9 @@ import (
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
+ "github.com/filecoin-project/go-state-types/builtin"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
- "github.com/filecoin-project/go-state-types/builtin/v10/eam"
"github.com/filecoin-project/go-state-types/builtin/v10/evm"
- "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
@@ -42,7 +37,6 @@ import (
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
- "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@@ -77,6 +71,8 @@ type EthModuleAPI interface {
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
Web3ClientVersion(ctx context.Context) (string, error)
+ EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
+ EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
}
type EthEventAPI interface {
@@ -241,101 +237,8 @@ func (a *EthModule) EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthH
return newEthBlockFromFilecoinTipSet(ctx, ts, fullTxInfo, a.Chain, a.StateAPI)
}
-func (a *EthModule) getTipsetByEthBlockNumberOrHash(ctx context.Context, blkParam ethtypes.EthBlockNumberOrHash) (*types.TipSet, error) {
- head := a.Chain.GetHeaviestTipSet()
-
- predefined := blkParam.PredefinedBlock
- if predefined != nil {
- if *predefined == "earliest" {
- return nil, fmt.Errorf("block param \"earliest\" is not supported")
- } else if *predefined == "pending" {
- return head, nil
- } else if *predefined == "latest" {
- parent, err := a.Chain.GetTipSetFromKey(ctx, head.Parents())
- if err != nil {
- return nil, fmt.Errorf("cannot get parent tipset")
- }
- return parent, nil
- } else {
- return nil, fmt.Errorf("unknown predefined block %s", *predefined)
- }
- }
-
- if blkParam.BlockNumber != nil {
- height := abi.ChainEpoch(*blkParam.BlockNumber)
- if height > head.Height()-1 {
- return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
- }
- ts, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, height, head.Key())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset at height: %v", height)
- }
- return ts, nil
- }
-
- if blkParam.BlockHash != nil {
- ts, err := a.Chain.GetTipSetByCid(ctx, blkParam.BlockHash.ToCid())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset by hash: %v", err)
- }
-
- // verify that the tipset is in the canonical chain
- if blkParam.RequireCanonical {
- // walk up the current chain (our head) until we reach ts.Height()
- walkTs, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, ts.Height(), head.Key())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset at height: %v", ts.Height())
- }
-
- // verify that it equals the expected tipset
- if !walkTs.Equals(ts) {
- return nil, fmt.Errorf("tipset is not canonical")
- }
- }
-
- return ts, nil
- }
-
- return nil, errors.New("invalid block param")
-}
-
-func (a *EthModule) parseBlkParam(ctx context.Context, blkParam string, strict bool) (*types.TipSet, error) {
- if blkParam == "earliest" {
- return nil, fmt.Errorf("block param \"earliest\" is not supported")
- }
-
- head := a.Chain.GetHeaviestTipSet()
- switch blkParam {
- case "pending":
- return head, nil
- case "latest":
- parent, err := a.Chain.GetTipSetFromKey(ctx, head.Parents())
- if err != nil {
- return nil, fmt.Errorf("cannot get parent tipset")
- }
- return parent, nil
- default:
- var num ethtypes.EthUint64
- err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`))
- if err != nil {
- return nil, fmt.Errorf("cannot parse block number: %v", err)
- }
- if abi.ChainEpoch(num) > head.Height()-1 {
- return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
- }
- ts, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(num), head.Key())
- if err != nil {
- return nil, fmt.Errorf("cannot get tipset at height: %v", num)
- }
- if strict && ts.Height() != abi.ChainEpoch(num) {
- return nil, ErrNullRound
- }
- return ts, nil
- }
-}
-
func (a *EthModule) EthGetBlockByNumber(ctx context.Context, blkParam string, fullTxInfo bool) (ethtypes.EthBlock, error) {
- ts, err := a.parseBlkParam(ctx, blkParam, true)
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, blkParam, true)
if err != nil {
return ethtypes.EthBlock{}, err
}
@@ -431,7 +334,7 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas
}
func (a *EthModule) EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) {
- hash, err := EthTxHashFromMessageCid(ctx, cid, a.StateAPI)
+ hash, err := ethTxHashFromMessageCid(ctx, cid, a.StateAPI)
if hash == ethtypes.EmptyEthHash {
// not found
return nil, nil
@@ -446,7 +349,7 @@ func (a *EthModule) EthGetTransactionCount(ctx context.Context, sender ethtypes.
return ethtypes.EthUint64(0), nil
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return ethtypes.EthUint64(0), xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -534,7 +437,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress,
return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return nil, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -613,7 +516,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress,
}
func (a *EthModule) EthGetStorageAt(ctx context.Context, ethAddr ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return nil, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -709,7 +612,7 @@ func (a *EthModule) EthGetBalance(ctx context.Context, address ethtypes.EthAddre
return ethtypes.EthBigInt{}, err
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return ethtypes.EthBigInt{}, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -790,7 +693,7 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth
}
}
- ts, err := a.parseBlkParam(ctx, params.NewestBlkNum, false)
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, params.NewestBlkNum, false)
if err != nil {
return ethtypes.EthFeeHistory{}, fmt.Errorf("bad block parameter %s: %s", params.NewestBlkNum, err)
}
@@ -922,62 +825,145 @@ func (a *EthModule) Web3ClientVersion(ctx context.Context) (string, error) {
return build.UserVersion(), nil
}
-func (a *EthModule) ethCallToFilecoinMessage(ctx context.Context, tx ethtypes.EthCall) (*types.Message, error) {
- var from address.Address
- if tx.From == nil || *tx.From == (ethtypes.EthAddress{}) {
- // Send from the filecoin "system" address.
- var err error
- from, err = (ethtypes.EthAddress{}).ToFilecoinAddress()
- if err != nil {
- return nil, fmt.Errorf("failed to construct the ethereum system address: %w", err)
- }
- } else {
- // The from address must be translatable to an f4 address.
- var err error
- from, err = tx.From.ToFilecoinAddress()
- if err != nil {
- return nil, fmt.Errorf("failed to translate sender address (%s): %w", tx.From.String(), err)
- }
- if p := from.Protocol(); p != address.Delegated {
- return nil, fmt.Errorf("expected a class 4 address, got: %d: %w", p, err)
- }
+func (a *EthModule) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) {
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, blkNum, false)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get tipset: %w", err)
}
- var params []byte
- if len(tx.Data) > 0 {
- initcode := abi.CborBytes(tx.Data)
- params2, err := actors.SerializeParams(&initcode)
- if err != nil {
- return nil, fmt.Errorf("failed to serialize params: %w", err)
- }
- params = params2
+ _, trace, err := a.StateManager.ExecutionTrace(ctx, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("failed when calling ExecutionTrace: %w", err)
}
- var to address.Address
- var method abi.MethodNum
- if tx.To == nil {
- // this is a contract creation
- to = builtintypes.EthereumAddressManagerActorAddr
- method = builtintypes.MethodsEAM.CreateExternal
- } else {
- addr, err := tx.To.ToFilecoinAddress()
- if err != nil {
- return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
- }
- to = addr
- method = builtintypes.MethodsEVM.InvokeContract
+ tsParent, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, ts.Height()+1, a.Chain.GetHeaviestTipSet().Key())
+ if err != nil {
+ return nil, xerrors.Errorf("cannot get tipset at height: %v", ts.Height()+1)
}
- return &types.Message{
- From: from,
- To: to,
- Value: big.Int(tx.Value),
- Method: method,
- Params: params,
- GasLimit: build.BlockGasLimit,
- GasFeeCap: big.Zero(),
- GasPremium: big.Zero(),
- }, nil
+ msgs, err := a.ChainGetParentMessages(ctx, tsParent.Blocks()[0].Cid())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get parent messages: %w", err)
+ }
+
+ cid, err := ts.Key().Cid()
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get tipset key cid: %w", err)
+ }
+
+ blkHash, err := ethtypes.EthHashFromCid(cid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to parse eth hash from cid: %w", err)
+ }
+
+ allTraces := make([]*ethtypes.EthTraceBlock, 0, len(trace))
+ msgIdx := 0
+ for _, ir := range trace {
+ // ignore messages from system actor
+ if ir.Msg.From == builtinactors.SystemActorAddr {
+ continue
+ }
+
+ // as we include TransactionPosition in the results, lets do sanity checking that the
+ // traces are indeed in the message execution order
+ if ir.Msg.Cid() != msgs[msgIdx].Message.Cid() {
+ return nil, xerrors.Errorf("traces are not in message execution order")
+ }
+ msgIdx++
+
+ txHash, err := a.EthGetTransactionHashByCid(ctx, ir.MsgCid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get transaction hash by cid: %w", err)
+ }
+ if txHash == nil {
+ log.Warnf("cannot find transaction hash for cid %s", ir.MsgCid)
+ continue
+ }
+
+ traces := []*ethtypes.EthTrace{}
+ err = buildTraces(ctx, &traces, nil, []int{}, ir.ExecutionTrace, int64(ts.Height()), a.StateAPI)
+ if err != nil {
+ return nil, xerrors.Errorf("failed building traces: %w", err)
+ }
+
+ traceBlocks := make([]*ethtypes.EthTraceBlock, 0, len(traces))
+ for _, trace := range traces {
+ traceBlocks = append(traceBlocks, ðtypes.EthTraceBlock{
+ EthTrace: trace,
+ BlockHash: blkHash,
+ BlockNumber: int64(ts.Height()),
+ TransactionHash: *txHash,
+ TransactionPosition: msgIdx,
+ })
+ }
+
+ allTraces = append(allTraces, traceBlocks...)
+ }
+
+ return allTraces, nil
+}
+
+func (a *EthModule) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
+ if len(traceTypes) != 1 || traceTypes[0] != "trace" {
+ return nil, fmt.Errorf("only 'trace' is supported")
+ }
+
+ ts, err := getTipsetByBlockNumber(ctx, a.Chain, blkNum, false)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get tipset: %w", err)
+ }
+
+ _, trace, err := a.StateManager.ExecutionTrace(ctx, ts)
+ if err != nil {
+ return nil, xerrors.Errorf("failed when calling ExecutionTrace: %w", err)
+ }
+
+ allTraces := make([]*ethtypes.EthTraceReplayBlockTransaction, 0, len(trace))
+ for _, ir := range trace {
+ // ignore messages from system actor
+ if ir.Msg.From == builtinactors.SystemActorAddr {
+ continue
+ }
+
+ txHash, err := a.EthGetTransactionHashByCid(ctx, ir.MsgCid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get transaction hash by cid: %w", err)
+ }
+ if txHash == nil {
+ log.Warnf("cannot find transaction hash for cid %s", ir.MsgCid)
+ continue
+ }
+
+ var output ethtypes.EthBytes
+ invokeCreateOnEAM := ir.Msg.To == builtin.EthereumAddressManagerActorAddr && (ir.Msg.Method == builtin.MethodsEAM.Create || ir.Msg.Method == builtin.MethodsEAM.Create2)
+ if ir.Msg.Method == builtin.MethodsEVM.InvokeContract || invokeCreateOnEAM {
+ output, err = decodePayload(ir.ExecutionTrace.MsgRct.Return, ir.ExecutionTrace.MsgRct.ReturnCodec)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to decode payload: %w", err)
+ }
+ } else {
+ output, err = handleFilecoinMethodOutput(ir.ExecutionTrace.MsgRct.ExitCode, ir.ExecutionTrace.MsgRct.ReturnCodec, ir.ExecutionTrace.MsgRct.Return)
+ if err != nil {
+ return nil, xerrors.Errorf("could not convert output: %w", err)
+ }
+ }
+
+ t := ethtypes.EthTraceReplayBlockTransaction{
+ Output: output,
+ TransactionHash: *txHash,
+ StateDiff: nil,
+ VmTrace: nil,
+ }
+
+ err = buildTraces(ctx, &t.Trace, nil, []int{}, ir.ExecutionTrace, int64(ts.Height()), a.StateAPI)
+ if err != nil {
+ return nil, xerrors.Errorf("failed building traces: %w", err)
+ }
+
+ allTraces = append(allTraces, &t)
+ }
+
+ return allTraces, nil
}
func (a *EthModule) applyMessage(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) {
@@ -1013,7 +999,7 @@ func (a *EthModule) applyMessage(ctx context.Context, msg *types.Message, tsk ty
}
func (a *EthModule) EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) {
- msg, err := a.ethCallToFilecoinMessage(ctx, tx)
+ msg, err := ethCallToFilecoinMessage(ctx, tx)
if err != nil {
return ethtypes.EthUint64(0), err
}
@@ -1171,12 +1157,12 @@ func ethGasSearch(
}
func (a *EthModule) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) {
- msg, err := a.ethCallToFilecoinMessage(ctx, tx)
+ msg, err := ethCallToFilecoinMessage(ctx, tx)
if err != nil {
return nil, xerrors.Errorf("failed to convert ethcall to filecoin message: %w", err)
}
- ts, err := a.getTipsetByEthBlockNumberOrHash(ctx, blkParam)
+ ts, err := getTipsetByEthBlockNumberOrHash(ctx, a.Chain, blkParam)
if err != nil {
return nil, xerrors.Errorf("failed to process block param: %v; %w", blkParam, err)
}
@@ -1577,977 +1563,6 @@ func (e *EthEvent) GC(ctx context.Context, ttl time.Duration) {
}
}
-type filterEventCollector interface {
- TakeCollectedEvents(context.Context) []*filter.CollectedEvent
-}
-
-type filterMessageCollector interface {
- TakeCollectedMessages(context.Context) []*types.SignedMessage
-}
-
-type filterTipSetCollector interface {
- TakeCollectedTipSets(context.Context) []types.TipSetKey
-}
-
-func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes.EthHash, ok bool) {
- var (
- topicsFound [4]bool
- topicsFoundCount int
- dataFound bool
- )
- // Topics must be non-nil, even if empty. So we might as well pre-allocate for 4 (the max).
- topics = make([]ethtypes.EthHash, 0, 4)
- for _, entry := range entries {
- // Drop events with non-raw topics to avoid mistakes.
- if entry.Codec != cid.Raw {
- log.Warnw("did not expect an event entry with a non-raw codec", "codec", entry.Codec, "key", entry.Key)
- return nil, nil, false
- }
- // Check if the key is t1..t4
- if len(entry.Key) == 2 && "t1" <= entry.Key && entry.Key <= "t4" {
- // '1' - '1' == 0, etc.
- idx := int(entry.Key[1] - '1')
-
- // Drop events with mis-sized topics.
- if len(entry.Value) != 32 {
- log.Warnw("got an EVM event topic with an invalid size", "key", entry.Key, "size", len(entry.Value))
- return nil, nil, false
- }
-
- // Drop events with duplicate topics.
- if topicsFound[idx] {
- log.Warnw("got a duplicate EVM event topic", "key", entry.Key)
- return nil, nil, false
- }
- topicsFound[idx] = true
- topicsFoundCount++
-
- // Extend the topics array
- for len(topics) <= idx {
- topics = append(topics, ethtypes.EthHash{})
- }
- copy(topics[idx][:], entry.Value)
- } else if entry.Key == "d" {
- // Drop events with duplicate data fields.
- if dataFound {
- log.Warnw("got duplicate EVM event data")
- return nil, nil, false
- }
-
- dataFound = true
- data = entry.Value
- } else {
- // Skip entries we don't understand (makes it easier to extend things).
- // But we warn for now because we don't expect them.
- log.Warnw("unexpected event entry", "key", entry.Key)
- }
-
- }
-
- // Drop events with skipped topics.
- if len(topics) != topicsFoundCount {
- log.Warnw("EVM event topic length mismatch", "expected", len(topics), "actual", topicsFoundCount)
- return nil, nil, false
- }
- return data, topics, true
-}
-
-func ethFilterResultFromEvents(evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) {
- res := ðtypes.EthFilterResult{}
- for _, ev := range evs {
- log := ethtypes.EthLog{
- Removed: ev.Reverted,
- LogIndex: ethtypes.EthUint64(ev.EventIdx),
- TransactionIndex: ethtypes.EthUint64(ev.MsgIdx),
- BlockNumber: ethtypes.EthUint64(ev.Height),
- }
- var (
- err error
- ok bool
- )
-
- log.Data, log.Topics, ok = ethLogFromEvent(ev.Entries)
- if !ok {
- continue
- }
-
- log.Address, err = ethtypes.EthAddressFromFilecoinAddress(ev.EmitterAddr)
- if err != nil {
- return nil, err
- }
-
- log.TransactionHash, err = EthTxHashFromMessageCid(context.TODO(), ev.MsgCid, sa)
- if err != nil {
- return nil, err
- }
- c, err := ev.TipSetKey.Cid()
- if err != nil {
- return nil, err
- }
- log.BlockHash, err = ethtypes.EthHashFromCid(c)
- if err != nil {
- return nil, err
- }
-
- res.Results = append(res.Results, log)
- }
-
- return res, nil
-}
-
-func ethFilterResultFromTipSets(tsks []types.TipSetKey) (*ethtypes.EthFilterResult, error) {
- res := ðtypes.EthFilterResult{}
-
- for _, tsk := range tsks {
- c, err := tsk.Cid()
- if err != nil {
- return nil, err
- }
- hash, err := ethtypes.EthHashFromCid(c)
- if err != nil {
- return nil, err
- }
-
- res.Results = append(res.Results, hash)
- }
-
- return res, nil
-}
-
-func ethFilterResultFromMessages(cs []*types.SignedMessage, sa StateAPI) (*ethtypes.EthFilterResult, error) {
- res := ðtypes.EthFilterResult{}
-
- for _, c := range cs {
- hash, err := EthTxHashFromSignedMessage(context.TODO(), c, sa)
- if err != nil {
- return nil, err
- }
-
- res.Results = append(res.Results, hash)
- }
-
- return res, nil
-}
-
-type EthSubscriptionManager struct {
- Chain *store.ChainStore
- StateAPI StateAPI
- ChainAPI ChainAPI
- mu sync.Mutex
- subs map[ethtypes.EthSubscriptionID]*ethSubscription
-}
-
-func (e *EthSubscriptionManager) StartSubscription(ctx context.Context, out ethSubscriptionCallback, dropFilter func(context.Context, filter.Filter) error) (*ethSubscription, error) { // nolint
- rawid, err := uuid.NewRandom()
- if err != nil {
- return nil, xerrors.Errorf("new uuid: %w", err)
- }
- id := ethtypes.EthSubscriptionID{}
- copy(id[:], rawid[:]) // uuid is 16 bytes
-
- ctx, quit := context.WithCancel(ctx)
-
- sub := ðSubscription{
- Chain: e.Chain,
- StateAPI: e.StateAPI,
- ChainAPI: e.ChainAPI,
- uninstallFilter: dropFilter,
- id: id,
- in: make(chan interface{}, 200),
- out: out,
- quit: quit,
-
- toSend: queue.New[[]byte](),
- sendCond: make(chan struct{}, 1),
- }
-
- e.mu.Lock()
- if e.subs == nil {
- e.subs = make(map[ethtypes.EthSubscriptionID]*ethSubscription)
- }
- e.subs[sub.id] = sub
- e.mu.Unlock()
-
- go sub.start(ctx)
- go sub.startOut(ctx)
-
- return sub, nil
-}
-
-func (e *EthSubscriptionManager) StopSubscription(ctx context.Context, id ethtypes.EthSubscriptionID) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- sub, ok := e.subs[id]
- if !ok {
- return xerrors.Errorf("subscription not found")
- }
- sub.stop()
- delete(e.subs, id)
-
- return nil
-}
-
-type ethSubscriptionCallback func(context.Context, jsonrpc.RawParams) error
-
-const maxSendQueue = 20000
-
-type ethSubscription struct {
- Chain *store.ChainStore
- StateAPI StateAPI
- ChainAPI ChainAPI
- uninstallFilter func(context.Context, filter.Filter) error
- id ethtypes.EthSubscriptionID
- in chan interface{}
- out ethSubscriptionCallback
-
- mu sync.Mutex
- filters []filter.Filter
- quit func()
-
- sendLk sync.Mutex
- sendQueueLen int
- toSend *queue.Queue[[]byte]
- sendCond chan struct{}
-}
-
-func (e *ethSubscription) addFilter(ctx context.Context, f filter.Filter) {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- f.SetSubChannel(e.in)
- e.filters = append(e.filters, f)
-}
-
-// sendOut processes the final subscription queue. It's here in case the subscriber
-// is slow, and we need to buffer the messages.
-func (e *ethSubscription) startOut(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case <-e.sendCond:
- e.sendLk.Lock()
-
- for !e.toSend.Empty() {
- front := e.toSend.Dequeue()
- e.sendQueueLen--
-
- e.sendLk.Unlock()
-
- if err := e.out(ctx, front); err != nil {
- log.Warnw("error sending subscription response, killing subscription", "sub", e.id, "error", err)
- e.stop()
- return
- }
-
- e.sendLk.Lock()
- }
-
- e.sendLk.Unlock()
- }
- }
-}
-
-func (e *ethSubscription) send(ctx context.Context, v interface{}) {
- resp := ethtypes.EthSubscriptionResponse{
- SubscriptionID: e.id,
- Result: v,
- }
-
- outParam, err := json.Marshal(resp)
- if err != nil {
- log.Warnw("marshaling subscription response", "sub", e.id, "error", err)
- return
- }
-
- e.sendLk.Lock()
- defer e.sendLk.Unlock()
-
- e.toSend.Enqueue(outParam)
-
- e.sendQueueLen++
- if e.sendQueueLen > maxSendQueue {
- log.Warnw("subscription send queue full, killing subscription", "sub", e.id)
- e.stop()
- return
- }
-
- select {
- case e.sendCond <- struct{}{}:
- default: // already signalled, and we're holding the lock so we know that the event will be processed
- }
-}
-
-func (e *ethSubscription) start(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case v := <-e.in:
- switch vt := v.(type) {
- case *filter.CollectedEvent:
- evs, err := ethFilterResultFromEvents([]*filter.CollectedEvent{vt}, e.StateAPI)
- if err != nil {
- continue
- }
-
- for _, r := range evs.Results {
- e.send(ctx, r)
- }
- case *types.TipSet:
- ev, err := newEthBlockFromFilecoinTipSet(ctx, vt, true, e.Chain, e.StateAPI)
- if err != nil {
- break
- }
-
- e.send(ctx, ev)
- case *types.SignedMessage: // mpool txid
- evs, err := ethFilterResultFromMessages([]*types.SignedMessage{vt}, e.StateAPI)
- if err != nil {
- continue
- }
-
- for _, r := range evs.Results {
- e.send(ctx, r)
- }
- default:
- log.Warnf("unexpected subscription value type: %T", vt)
- }
- }
- }
-}
-
-func (e *ethSubscription) stop() {
- e.mu.Lock()
- if e.quit == nil {
- e.mu.Unlock()
- return
- }
-
- if e.quit != nil {
- e.quit()
- e.quit = nil
- e.mu.Unlock()
-
- for _, f := range e.filters {
- // note: the context in actually unused in uninstallFilter
- if err := e.uninstallFilter(context.TODO(), f); err != nil {
- // this will leave the filter a zombie, collecting events up to the maximum allowed
- log.Warnf("failed to remove filter when unsubscribing: %v", err)
- }
- }
- }
-}
-
-func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTxInfo bool, cs *store.ChainStore, sa StateAPI) (ethtypes.EthBlock, error) {
- parentKeyCid, err := ts.Parents().Cid()
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
- parentBlkHash, err := ethtypes.EthHashFromCid(parentKeyCid)
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
-
- bn := ethtypes.EthUint64(ts.Height())
-
- blkCid, err := ts.Key().Cid()
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
- blkHash, err := ethtypes.EthHashFromCid(blkCid)
- if err != nil {
- return ethtypes.EthBlock{}, err
- }
-
- msgs, rcpts, err := messagesAndReceipts(ctx, ts, cs, sa)
- if err != nil {
- return ethtypes.EthBlock{}, xerrors.Errorf("failed to retrieve messages and receipts: %w", err)
- }
-
- block := ethtypes.NewEthBlock(len(msgs) > 0)
-
- gasUsed := int64(0)
- for i, msg := range msgs {
- rcpt := rcpts[i]
- ti := ethtypes.EthUint64(i)
- gasUsed += rcpt.GasUsed
- var smsg *types.SignedMessage
- switch msg := msg.(type) {
- case *types.SignedMessage:
- smsg = msg
- case *types.Message:
- smsg = &types.SignedMessage{
- Message: *msg,
- Signature: crypto.Signature{
- Type: crypto.SigTypeBLS,
- },
- }
- default:
- return ethtypes.EthBlock{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.Cid(), err)
- }
- tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
- if err != nil {
- return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err)
- }
-
- tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
- tx.BlockHash = &blkHash
- tx.BlockNumber = &bn
- tx.TransactionIndex = &ti
-
- if fullTxInfo {
- block.Transactions = append(block.Transactions, tx)
- } else {
- block.Transactions = append(block.Transactions, tx.Hash.String())
- }
- }
-
- block.Hash = blkHash
- block.Number = bn
- block.ParentHash = parentBlkHash
- block.Timestamp = ethtypes.EthUint64(ts.Blocks()[0].Timestamp)
- block.BaseFeePerGas = ethtypes.EthBigInt{Int: ts.Blocks()[0].ParentBaseFee.Int}
- block.GasUsed = ethtypes.EthUint64(gasUsed)
- return block, nil
-}
-
-func messagesAndReceipts(ctx context.Context, ts *types.TipSet, cs *store.ChainStore, sa StateAPI) ([]types.ChainMsg, []types.MessageReceipt, error) {
- msgs, err := cs.MessagesForTipset(ctx, ts)
- if err != nil {
- return nil, nil, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err)
- }
-
- _, rcptRoot, err := sa.StateManager.TipSetState(ctx, ts)
- if err != nil {
- return nil, nil, xerrors.Errorf("failed to compute state: %w", err)
- }
-
- rcpts, err := cs.ReadReceipts(ctx, rcptRoot)
- if err != nil {
- return nil, nil, xerrors.Errorf("error loading receipts for tipset: %v: %w", ts, err)
- }
-
- if len(msgs) != len(rcpts) {
- return nil, nil, xerrors.Errorf("receipts and message array lengths didn't match for tipset: %v: %w", ts, err)
- }
-
- return msgs, rcpts, nil
-}
-
-// lookupEthAddress makes its best effort at finding the Ethereum address for a
-// Filecoin address. It does the following:
-//
-// 1. If the supplied address is an f410 address, we return its payload as the EthAddress.
-// 2. Otherwise (f0, f1, f2, f3), we look up the actor on the state tree. If it has a delegated address, we return it if it's f410 address.
-// 3. Otherwise, we fall back to returning a masked ID Ethereum address. If the supplied address is an f0 address, we
-// use that ID to form the masked ID address.
-// 4. Otherwise, we fetch the actor's ID from the state tree and form the masked ID with it.
-func lookupEthAddress(ctx context.Context, addr address.Address, sa StateAPI) (ethtypes.EthAddress, error) {
- // BLOCK A: We are trying to get an actual Ethereum address from an f410 address.
- // Attempt to convert directly, if it's an f4 address.
- ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(addr)
- if err == nil && !ethAddr.IsMaskedID() {
- return ethAddr, nil
- }
-
- // Lookup on the target actor and try to get an f410 address.
- if actor, err := sa.StateGetActor(ctx, addr, types.EmptyTSK); err != nil {
- return ethtypes.EthAddress{}, err
- } else if actor.Address != nil {
- if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() {
- return ethAddr, nil
- }
- }
-
- // BLOCK B: We gave up on getting an actual Ethereum address and are falling back to a Masked ID address.
- // Check if we already have an ID addr, and use it if possible.
- if err == nil && ethAddr.IsMaskedID() {
- return ethAddr, nil
- }
-
- // Otherwise, resolve the ID addr.
- idAddr, err := sa.StateLookupID(ctx, addr, types.EmptyTSK)
- if err != nil {
- return ethtypes.EthAddress{}, err
- }
- return ethtypes.EthAddressFromFilecoinAddress(idAddr)
-}
-
-func EthTxHashFromMessageCid(ctx context.Context, c cid.Cid, sa StateAPI) (ethtypes.EthHash, error) {
- smsg, err := sa.Chain.GetSignedMessage(ctx, c)
- if err == nil {
- // This is an Eth Tx, Secp message, Or BLS message in the mpool
- return EthTxHashFromSignedMessage(ctx, smsg, sa)
- }
-
- _, err = sa.Chain.GetMessage(ctx, c)
- if err == nil {
- // This is a BLS message
- return ethtypes.EthHashFromCid(c)
- }
-
- return ethtypes.EmptyEthHash, nil
-}
-
-func EthTxHashFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthHash, error) {
- if smsg.Signature.Type == crypto.SigTypeDelegated {
- ethTx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
- if err != nil {
- return ethtypes.EmptyEthHash, err
- }
- return ethTx.Hash, nil
- } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 {
- return ethtypes.EthHashFromCid(smsg.Cid())
- } else { // BLS message
- return ethtypes.EthHashFromCid(smsg.Message.Cid())
- }
-}
-
-func newEthTxFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthTx, error) {
- var tx ethtypes.EthTx
- var err error
-
- // This is an eth tx
- if smsg.Signature.Type == crypto.SigTypeDelegated {
- tx, err = ethtypes.EthTxFromSignedEthMessage(smsg)
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to convert from signed message: %w", err)
- }
-
- tx.Hash, err = tx.TxHash()
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to calculate hash for ethTx: %w", err)
- }
-
- fromAddr, err := lookupEthAddress(ctx, smsg.Message.From, sa)
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
- }
-
- tx.From = fromAddr
- } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 { // Secp Filecoin Message
- tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
- tx.Hash, err = ethtypes.EthHashFromCid(smsg.Cid())
- if err != nil {
- return tx, err
- }
- } else { // BLS Filecoin message
- tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
- tx.Hash, err = ethtypes.EthHashFromCid(smsg.Message.Cid())
- if err != nil {
- return tx, err
- }
- }
-
- return tx, nil
-}
-
-// ethTxFromNativeMessage does NOT populate:
-// - BlockHash
-// - BlockNumber
-// - TransactionIndex
-// - Hash
-func ethTxFromNativeMessage(ctx context.Context, msg *types.Message, sa StateAPI) ethtypes.EthTx {
- // We don't care if we error here, conversion is best effort for non-eth transactions
- from, _ := lookupEthAddress(ctx, msg.From, sa)
- to, _ := lookupEthAddress(ctx, msg.To, sa)
- return ethtypes.EthTx{
- To: &to,
- From: from,
- Nonce: ethtypes.EthUint64(msg.Nonce),
- ChainID: ethtypes.EthUint64(build.Eip155ChainId),
- Value: ethtypes.EthBigInt(msg.Value),
- Type: ethtypes.Eip1559TxType,
- Gas: ethtypes.EthUint64(msg.GasLimit),
- MaxFeePerGas: ethtypes.EthBigInt(msg.GasFeeCap),
- MaxPriorityFeePerGas: ethtypes.EthBigInt(msg.GasPremium),
- AccessList: []ethtypes.EthHash{},
- }
-}
-
-// newEthTxFromMessageLookup creates an ethereum transaction from filecoin message lookup. If a negative txIdx is passed
-// into the function, it looks up the transaction index of the message in the tipset, otherwise it uses the txIdx passed into the
-// function
-func newEthTxFromMessageLookup(ctx context.Context, msgLookup *api.MsgLookup, txIdx int, cs *store.ChainStore, sa StateAPI) (ethtypes.EthTx, error) {
- ts, err := cs.LoadTipSet(ctx, msgLookup.TipSet)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- // This tx is located in the parent tipset
- parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- parentTsCid, err := parentTs.Key().Cid()
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- // lookup the transactionIndex
- if txIdx < 0 {
- msgs, err := cs.MessagesForTipset(ctx, parentTs)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
- for i, msg := range msgs {
- if msg.Cid() == msgLookup.Message {
- txIdx = i
- break
- }
- }
- if txIdx < 0 {
- return ethtypes.EthTx{}, fmt.Errorf("cannot find the msg in the tipset")
- }
- }
-
- blkHash, err := ethtypes.EthHashFromCid(parentTsCid)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- smsg, err := getSignedMessage(ctx, cs, msgLookup.Message)
- if err != nil {
- return ethtypes.EthTx{}, xerrors.Errorf("failed to get signed msg: %w", err)
- }
-
- tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
- if err != nil {
- return ethtypes.EthTx{}, err
- }
-
- var (
- bn = ethtypes.EthUint64(parentTs.Height())
- ti = ethtypes.EthUint64(txIdx)
- )
-
- tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
- tx.BlockHash = &blkHash
- tx.BlockNumber = &bn
- tx.TransactionIndex = &ti
- return tx, nil
-}
-
-func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLookup, events []types.Event, cs *store.ChainStore, sa StateAPI) (api.EthTxReceipt, error) {
- var (
- transactionIndex ethtypes.EthUint64
- blockHash ethtypes.EthHash
- blockNumber ethtypes.EthUint64
- )
-
- if tx.TransactionIndex != nil {
- transactionIndex = *tx.TransactionIndex
- }
- if tx.BlockHash != nil {
- blockHash = *tx.BlockHash
- }
- if tx.BlockNumber != nil {
- blockNumber = *tx.BlockNumber
- }
-
- receipt := api.EthTxReceipt{
- TransactionHash: tx.Hash,
- From: tx.From,
- To: tx.To,
- TransactionIndex: transactionIndex,
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- Type: ethtypes.EthUint64(2),
- Logs: []ethtypes.EthLog{}, // empty log array is compulsory when no logs, or libraries like ethers.js break
- LogsBloom: ethtypes.EmptyEthBloom[:],
- }
-
- if lookup.Receipt.ExitCode.IsSuccess() {
- receipt.Status = 1
- } else {
- receipt.Status = 0
- }
-
- receipt.GasUsed = ethtypes.EthUint64(lookup.Receipt.GasUsed)
-
- // TODO: handle CumulativeGasUsed
- receipt.CumulativeGasUsed = ethtypes.EmptyEthInt
-
- // TODO: avoid loading the tipset twice (once here, once when we convert the message to a txn)
- ts, err := cs.GetTipSetFromKey(ctx, lookup.TipSet)
- if err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err)
- }
-
- baseFee := ts.Blocks()[0].ParentBaseFee
- gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true)
- totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn)
-
- effectiveGasPrice := big.Zero()
- if lookup.Receipt.GasUsed > 0 {
- effectiveGasPrice = big.Div(totalSpent, big.NewInt(lookup.Receipt.GasUsed))
- }
- receipt.EffectiveGasPrice = ethtypes.EthBigInt(effectiveGasPrice)
-
- if receipt.To == nil && lookup.Receipt.ExitCode.IsSuccess() {
- // Create and Create2 return the same things.
- var ret eam.CreateExternalReturn
- if err := ret.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to parse contract creation result: %w", err)
- }
- addr := ethtypes.EthAddress(ret.EthAddress)
- receipt.ContractAddress = &addr
- }
-
- if len(events) > 0 {
- receipt.Logs = make([]ethtypes.EthLog, 0, len(events))
- for i, evt := range events {
- l := ethtypes.EthLog{
- Removed: false,
- LogIndex: ethtypes.EthUint64(i),
- TransactionHash: tx.Hash,
- TransactionIndex: transactionIndex,
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- }
-
- data, topics, ok := ethLogFromEvent(evt.Entries)
- if !ok {
- // not an eth event.
- continue
- }
- for _, topic := range topics {
- ethtypes.EthBloomSet(receipt.LogsBloom, topic[:])
- }
- l.Data = data
- l.Topics = topics
-
- addr, err := address.NewIDAddress(uint64(evt.Emitter))
- if err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to create ID address: %w", err)
- }
-
- l.Address, err = lookupEthAddress(ctx, addr, sa)
- if err != nil {
- return api.EthTxReceipt{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
- }
-
- ethtypes.EthBloomSet(receipt.LogsBloom, l.Address[:])
- receipt.Logs = append(receipt.Logs, l)
- }
- }
-
- return receipt, nil
-}
-
-func (m *EthTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error {
- for _, blk := range to.Blocks() {
- _, smsgs, err := m.StateAPI.Chain.MessagesForBlock(ctx, blk)
- if err != nil {
- return err
- }
-
- for _, smsg := range smsgs {
- if smsg.Signature.Type != crypto.SigTypeDelegated {
- continue
- }
-
- hash, err := EthTxHashFromSignedMessage(ctx, smsg, m.StateAPI)
- if err != nil {
- return err
- }
-
- err = m.TransactionHashLookup.UpsertHash(hash, smsg.Cid())
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-type EthTxHashManager struct {
- StateAPI StateAPI
- TransactionHashLookup *ethhashlookup.EthTxHashLookup
-}
-
-func (m *EthTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error {
- return nil
-}
-
-func (m *EthTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error {
- if minHeight < build.UpgradeHyggeHeight {
- minHeight = build.UpgradeHyggeHeight
- }
-
- ts := m.StateAPI.Chain.GetHeaviestTipSet()
- for ts.Height() > minHeight {
- for _, block := range ts.Blocks() {
- msgs, err := m.StateAPI.Chain.SecpkMessagesForBlock(ctx, block)
- if err != nil {
- // If we can't find the messages, we've either imported from snapshot or pruned the store
- log.Debug("exiting message mapping population at epoch ", ts.Height())
- return nil
- }
-
- for _, msg := range msgs {
- m.ProcessSignedMessage(ctx, msg)
- }
- }
-
- var err error
- ts, err = m.StateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents())
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (m *EthTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) {
- if msg.Signature.Type != crypto.SigTypeDelegated {
- return
- }
-
- ethTx, err := newEthTxFromSignedMessage(ctx, msg, m.StateAPI)
- if err != nil {
- log.Errorf("error converting filecoin message to eth tx: %s", err)
- return
- }
-
- err = m.TransactionHashLookup.UpsertHash(ethTx.Hash, msg.Cid())
- if err != nil {
- log.Errorf("error inserting tx mapping to db: %s", err)
- return
- }
-}
-
-func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager *EthTxHashManager) {
- for {
- select {
- case <-ctx.Done():
- return
- case u := <-ch:
- if u.Type != api.MpoolAdd {
- continue
- }
-
- manager.ProcessSignedMessage(ctx, u.Message)
- }
- }
-}
-
-func EthTxHashGC(ctx context.Context, retentionDays int, manager *EthTxHashManager) {
- if retentionDays == 0 {
- return
- }
-
- gcPeriod := 1 * time.Hour
- for {
- entriesDeleted, err := manager.TransactionHashLookup.DeleteEntriesOlderThan(retentionDays)
- if err != nil {
- log.Errorf("error garbage collecting eth transaction hash database: %s", err)
- }
- log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted)
- time.Sleep(gcPeriod)
- }
-}
-
-func parseEthTopics(topics ethtypes.EthTopicSpec) (map[string][][]byte, error) {
- keys := map[string][][]byte{}
- for idx, vals := range topics {
- if len(vals) == 0 {
- continue
- }
- // Ethereum topics are emitted using `LOG{0..4}` opcodes resulting in topics1..4
- key := fmt.Sprintf("t%d", idx+1)
- for _, v := range vals {
- v := v // copy the ethhash to avoid repeatedly referencing the same one.
- keys[key] = append(keys[key], v[:])
- }
- }
- return keys, nil
-}
-
-const errorFunctionSelector = "\x08\xc3\x79\xa0" // Error(string)
-const panicFunctionSelector = "\x4e\x48\x7b\x71" // Panic(uint256)
-// Eth ABI (solidity) panic codes.
-var panicErrorCodes map[uint64]string = map[uint64]string{
- 0x00: "Panic()",
- 0x01: "Assert()",
- 0x11: "ArithmeticOverflow()",
- 0x12: "DivideByZero()",
- 0x21: "InvalidEnumVariant()",
- 0x22: "InvalidStorageArray()",
- 0x31: "PopEmptyArray()",
- 0x32: "ArrayIndexOutOfBounds()",
- 0x41: "OutOfMemory()",
- 0x51: "CalledUninitializedFunction()",
-}
-
-// Parse an ABI encoded revert reason. This reason should be encoded as if it were the parameters to
-// an `Error(string)` function call.
-//
-// See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require
-func parseEthRevert(ret []byte) string {
- if len(ret) == 0 {
- return "none"
- }
- var cbytes abi.CborBytes
- if err := cbytes.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
- return "ERROR: revert reason is not cbor encoded bytes"
- }
- if len(cbytes) == 0 {
- return "none"
- }
- // If it's not long enough to contain an ABI encoded response, return immediately.
- if len(cbytes) < 4+32 {
- return ethtypes.EthBytes(cbytes).String()
- }
- switch string(cbytes[:4]) {
- case panicFunctionSelector:
- cbytes := cbytes[4 : 4+32]
- // Read the and check the code.
- code, err := ethtypes.EthUint64FromBytes(cbytes)
- if err != nil {
- // If it's too big, just return the raw value.
- codeInt := big.PositiveFromUnsignedBytes(cbytes)
- return fmt.Sprintf("Panic(%s)", ethtypes.EthBigInt(codeInt).String())
- }
- if s, ok := panicErrorCodes[uint64(code)]; ok {
- return s
- }
- return fmt.Sprintf("Panic(0x%x)", code)
- case errorFunctionSelector:
- cbytes := cbytes[4:]
- cbytesLen := ethtypes.EthUint64(len(cbytes))
- // Read the and check the offset.
- offset, err := ethtypes.EthUint64FromBytes(cbytes[:32])
- if err != nil {
- break
- }
- if cbytesLen < offset {
- break
- }
-
- // Read and check the length.
- if cbytesLen-offset < 32 {
- break
- }
- start := offset + 32
- length, err := ethtypes.EthUint64FromBytes(cbytes[offset : offset+32])
- if err != nil {
- break
- }
- if cbytesLen-start < length {
- break
- }
- // Slice the error message.
- return fmt.Sprintf("Error(%s)", cbytes[start:start+length])
- }
- return ethtypes.EthBytes(cbytes).String()
-}
-
func calculateRewardsAndGasUsed(rewardPercentiles []float64, txGasRewards gasRewardSorter) ([]ethtypes.EthBigInt, int64) {
var gasUsedTotal int64
for _, tx := range txGasRewards {
@@ -2579,25 +1594,6 @@ func calculateRewardsAndGasUsed(rewardPercentiles []float64, txGasRewards gasRew
return rewards, gasUsedTotal
}
-func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) {
- smsg, err := cs.GetSignedMessage(ctx, msgCid)
- if err != nil {
- // We couldn't find the signed message, it might be a BLS message, so search for a regular message.
- msg, err := cs.GetMessage(ctx, msgCid)
- if err != nil {
- return nil, xerrors.Errorf("failed to find msg %s: %w", msgCid, err)
- }
- smsg = &types.SignedMessage{
- Message: *msg,
- Signature: crypto.Signature{
- Type: crypto.SigTypeBLS,
- },
- }
- }
-
- return smsg, nil
-}
-
type gasRewardTuple struct {
gasUsed int64
premium abi.TokenAmount
diff --git a/node/impl/full/eth_event.go b/node/impl/full/eth_event.go
new file mode 100644
index 000000000..69021e08a
--- /dev/null
+++ b/node/impl/full/eth_event.go
@@ -0,0 +1,382 @@
+package full
+
+import (
+ "context"
+ "encoding/json"
+ "sync"
+
+ "github.com/google/uuid"
+ "github.com/ipfs/go-cid"
+ "github.com/zyedidia/generic/queue"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-jsonrpc"
+
+ "github.com/filecoin-project/lotus/chain/events/filter"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/ethtypes"
+)
+
+type filterEventCollector interface {
+ TakeCollectedEvents(context.Context) []*filter.CollectedEvent
+}
+
+type filterMessageCollector interface {
+ TakeCollectedMessages(context.Context) []*types.SignedMessage
+}
+
+type filterTipSetCollector interface {
+ TakeCollectedTipSets(context.Context) []types.TipSetKey
+}
+
+func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes.EthHash, ok bool) {
+ var (
+ topicsFound [4]bool
+ topicsFoundCount int
+ dataFound bool
+ )
+ // Topics must be non-nil, even if empty. So we might as well pre-allocate for 4 (the max).
+ topics = make([]ethtypes.EthHash, 0, 4)
+ for _, entry := range entries {
+ // Drop events with non-raw topics to avoid mistakes.
+ if entry.Codec != cid.Raw {
+ log.Warnw("did not expect an event entry with a non-raw codec", "codec", entry.Codec, "key", entry.Key)
+ return nil, nil, false
+ }
+ // Check if the key is t1..t4
+ if len(entry.Key) == 2 && "t1" <= entry.Key && entry.Key <= "t4" {
+ // '1' - '1' == 0, etc.
+ idx := int(entry.Key[1] - '1')
+
+ // Drop events with mis-sized topics.
+ if len(entry.Value) != 32 {
+ log.Warnw("got an EVM event topic with an invalid size", "key", entry.Key, "size", len(entry.Value))
+ return nil, nil, false
+ }
+
+ // Drop events with duplicate topics.
+ if topicsFound[idx] {
+ log.Warnw("got a duplicate EVM event topic", "key", entry.Key)
+ return nil, nil, false
+ }
+ topicsFound[idx] = true
+ topicsFoundCount++
+
+ // Extend the topics array
+ for len(topics) <= idx {
+ topics = append(topics, ethtypes.EthHash{})
+ }
+ copy(topics[idx][:], entry.Value)
+ } else if entry.Key == "d" {
+ // Drop events with duplicate data fields.
+ if dataFound {
+ log.Warnw("got duplicate EVM event data")
+ return nil, nil, false
+ }
+
+ dataFound = true
+ data = entry.Value
+ } else {
+ // Skip entries we don't understand (makes it easier to extend things).
+ // But we warn for now because we don't expect them.
+ log.Warnw("unexpected event entry", "key", entry.Key)
+ }
+
+ }
+
+ // Drop events with skipped topics.
+ if len(topics) != topicsFoundCount {
+ log.Warnw("EVM event topic length mismatch", "expected", len(topics), "actual", topicsFoundCount)
+ return nil, nil, false
+ }
+ return data, topics, true
+}
+
+func ethFilterResultFromEvents(evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) {
+ res := ðtypes.EthFilterResult{}
+ for _, ev := range evs {
+ log := ethtypes.EthLog{
+ Removed: ev.Reverted,
+ LogIndex: ethtypes.EthUint64(ev.EventIdx),
+ TransactionIndex: ethtypes.EthUint64(ev.MsgIdx),
+ BlockNumber: ethtypes.EthUint64(ev.Height),
+ }
+ var (
+ err error
+ ok bool
+ )
+
+ log.Data, log.Topics, ok = ethLogFromEvent(ev.Entries)
+ if !ok {
+ continue
+ }
+
+ log.Address, err = ethtypes.EthAddressFromFilecoinAddress(ev.EmitterAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ log.TransactionHash, err = ethTxHashFromMessageCid(context.TODO(), ev.MsgCid, sa)
+ if err != nil {
+ return nil, err
+ }
+ c, err := ev.TipSetKey.Cid()
+ if err != nil {
+ return nil, err
+ }
+ log.BlockHash, err = ethtypes.EthHashFromCid(c)
+ if err != nil {
+ return nil, err
+ }
+
+ res.Results = append(res.Results, log)
+ }
+
+ return res, nil
+}
+
+func ethFilterResultFromTipSets(tsks []types.TipSetKey) (*ethtypes.EthFilterResult, error) {
+ res := ðtypes.EthFilterResult{}
+
+ for _, tsk := range tsks {
+ c, err := tsk.Cid()
+ if err != nil {
+ return nil, err
+ }
+ hash, err := ethtypes.EthHashFromCid(c)
+ if err != nil {
+ return nil, err
+ }
+
+ res.Results = append(res.Results, hash)
+ }
+
+ return res, nil
+}
+
+func ethFilterResultFromMessages(cs []*types.SignedMessage, sa StateAPI) (*ethtypes.EthFilterResult, error) {
+ res := ðtypes.EthFilterResult{}
+
+ for _, c := range cs {
+ hash, err := ethTxHashFromSignedMessage(context.TODO(), c, sa)
+ if err != nil {
+ return nil, err
+ }
+
+ res.Results = append(res.Results, hash)
+ }
+
+ return res, nil
+}
+
+type EthSubscriptionManager struct {
+ Chain *store.ChainStore
+ StateAPI StateAPI
+ ChainAPI ChainAPI
+ mu sync.Mutex
+ subs map[ethtypes.EthSubscriptionID]*ethSubscription
+}
+
+func (e *EthSubscriptionManager) StartSubscription(ctx context.Context, out ethSubscriptionCallback, dropFilter func(context.Context, filter.Filter) error) (*ethSubscription, error) { // nolint
+ rawid, err := uuid.NewRandom()
+ if err != nil {
+ return nil, xerrors.Errorf("new uuid: %w", err)
+ }
+ id := ethtypes.EthSubscriptionID{}
+ copy(id[:], rawid[:]) // uuid is 16 bytes
+
+ ctx, quit := context.WithCancel(ctx)
+
+ sub := ðSubscription{
+ Chain: e.Chain,
+ StateAPI: e.StateAPI,
+ ChainAPI: e.ChainAPI,
+ uninstallFilter: dropFilter,
+ id: id,
+ in: make(chan interface{}, 200),
+ out: out,
+ quit: quit,
+
+ toSend: queue.New[[]byte](),
+ sendCond: make(chan struct{}, 1),
+ }
+
+ e.mu.Lock()
+ if e.subs == nil {
+ e.subs = make(map[ethtypes.EthSubscriptionID]*ethSubscription)
+ }
+ e.subs[sub.id] = sub
+ e.mu.Unlock()
+
+ go sub.start(ctx)
+ go sub.startOut(ctx)
+
+ return sub, nil
+}
+
+func (e *EthSubscriptionManager) StopSubscription(ctx context.Context, id ethtypes.EthSubscriptionID) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ sub, ok := e.subs[id]
+ if !ok {
+ return xerrors.Errorf("subscription not found")
+ }
+ sub.stop()
+ delete(e.subs, id)
+
+ return nil
+}
+
+type ethSubscriptionCallback func(context.Context, jsonrpc.RawParams) error
+
+const maxSendQueue = 20000
+
+type ethSubscription struct {
+ Chain *store.ChainStore
+ StateAPI StateAPI
+ ChainAPI ChainAPI
+ uninstallFilter func(context.Context, filter.Filter) error
+ id ethtypes.EthSubscriptionID
+ in chan interface{}
+ out ethSubscriptionCallback
+
+ mu sync.Mutex
+ filters []filter.Filter
+ quit func()
+
+ sendLk sync.Mutex
+ sendQueueLen int
+ toSend *queue.Queue[[]byte]
+ sendCond chan struct{}
+}
+
+func (e *ethSubscription) addFilter(ctx context.Context, f filter.Filter) {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ f.SetSubChannel(e.in)
+ e.filters = append(e.filters, f)
+}
+
+// sendOut processes the final subscription queue. It's here in case the subscriber
+// is slow, and we need to buffer the messages.
+func (e *ethSubscription) startOut(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-e.sendCond:
+ e.sendLk.Lock()
+
+ for !e.toSend.Empty() {
+ front := e.toSend.Dequeue()
+ e.sendQueueLen--
+
+ e.sendLk.Unlock()
+
+ if err := e.out(ctx, front); err != nil {
+ log.Warnw("error sending subscription response, killing subscription", "sub", e.id, "error", err)
+ e.stop()
+ return
+ }
+
+ e.sendLk.Lock()
+ }
+
+ e.sendLk.Unlock()
+ }
+ }
+}
+
+func (e *ethSubscription) send(ctx context.Context, v interface{}) {
+ resp := ethtypes.EthSubscriptionResponse{
+ SubscriptionID: e.id,
+ Result: v,
+ }
+
+ outParam, err := json.Marshal(resp)
+ if err != nil {
+ log.Warnw("marshaling subscription response", "sub", e.id, "error", err)
+ return
+ }
+
+ e.sendLk.Lock()
+ defer e.sendLk.Unlock()
+
+ e.toSend.Enqueue(outParam)
+
+ e.sendQueueLen++
+ if e.sendQueueLen > maxSendQueue {
+ log.Warnw("subscription send queue full, killing subscription", "sub", e.id)
+ e.stop()
+ return
+ }
+
+ select {
+ case e.sendCond <- struct{}{}:
+ default: // already signalled, and we're holding the lock so we know that the event will be processed
+ }
+}
+
+func (e *ethSubscription) start(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case v := <-e.in:
+ switch vt := v.(type) {
+ case *filter.CollectedEvent:
+ evs, err := ethFilterResultFromEvents([]*filter.CollectedEvent{vt}, e.StateAPI)
+ if err != nil {
+ continue
+ }
+
+ for _, r := range evs.Results {
+ e.send(ctx, r)
+ }
+ case *types.TipSet:
+ ev, err := newEthBlockFromFilecoinTipSet(ctx, vt, true, e.Chain, e.StateAPI)
+ if err != nil {
+ break
+ }
+
+ e.send(ctx, ev)
+ case *types.SignedMessage: // mpool txid
+ evs, err := ethFilterResultFromMessages([]*types.SignedMessage{vt}, e.StateAPI)
+ if err != nil {
+ continue
+ }
+
+ for _, r := range evs.Results {
+ e.send(ctx, r)
+ }
+ default:
+ log.Warnf("unexpected subscription value type: %T", vt)
+ }
+ }
+ }
+}
+
+func (e *ethSubscription) stop() {
+ e.mu.Lock()
+ if e.quit == nil {
+ e.mu.Unlock()
+ return
+ }
+
+ if e.quit != nil {
+ e.quit()
+ e.quit = nil
+ e.mu.Unlock()
+
+ for _, f := range e.filters {
+ // note: the context in actually unused in uninstallFilter
+ if err := e.uninstallFilter(context.TODO(), f); err != nil {
+ // this will leave the filter a zombie, collecting events up to the maximum allowed
+ log.Warnf("failed to remove filter when unsubscribing: %v", err)
+ }
+ }
+ }
+}
diff --git a/node/impl/full/eth_trace.go b/node/impl/full/eth_trace.go
new file mode 100644
index 000000000..3766c5448
--- /dev/null
+++ b/node/impl/full/eth_trace.go
@@ -0,0 +1,353 @@
+package full
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/multiformats/go-multicodec"
+ cbg "github.com/whyrusleeping/cbor-gen"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/builtin"
+ "github.com/filecoin-project/go-state-types/builtin/v10/evm"
+ "github.com/filecoin-project/go-state-types/exitcode"
+
+ builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/ethtypes"
+)
+
+// decodePayload is a utility function which decodes the payload using the given codec
+func decodePayload(payload []byte, codec uint64) (ethtypes.EthBytes, error) {
+ if len(payload) == 0 {
+ return nil, nil
+ }
+
+ switch multicodec.Code(codec) {
+ case multicodec.Identity:
+ return nil, nil
+ case multicodec.DagCbor, multicodec.Cbor:
+ buf, err := cbg.ReadByteArray(bytes.NewReader(payload), uint64(len(payload)))
+ if err != nil {
+ return nil, xerrors.Errorf("decodePayload: failed to decode cbor payload: %w", err)
+ }
+ return buf, nil
+ case multicodec.Raw:
+ return ethtypes.EthBytes(payload), nil
+ }
+
+ return nil, xerrors.Errorf("decodePayload: unsupported codec: %d", codec)
+}
+
+// buildTraces recursively builds the traces for a given ExecutionTrace by walking the subcalls
+func buildTraces(ctx context.Context, traces *[]*ethtypes.EthTrace, parent *ethtypes.EthTrace, addr []int, et types.ExecutionTrace, height int64, sa StateAPI) error {
+ // lookup the eth address from the from/to addresses. Note that this may fail but to support
+ // this we need to include the ActorID in the trace. For now, just log a warning and skip
+ // this trace.
+ //
+ // TODO: Add ActorID in trace, see https://github.com/filecoin-project/lotus/pull/11100#discussion_r1302442288
+ from, err := lookupEthAddress(ctx, et.Msg.From, sa)
+ if err != nil {
+ log.Warnf("buildTraces: failed to lookup from address %s: %v", et.Msg.From, err)
+ return nil
+ }
+ to, err := lookupEthAddress(ctx, et.Msg.To, sa)
+ if err != nil {
+ log.Warnf("buildTraces: failed to lookup to address %s: %w", et.Msg.To, err)
+ return nil
+ }
+
+ trace := ðtypes.EthTrace{
+ Action: ethtypes.EthTraceAction{
+ From: from,
+ To: to,
+ Gas: ethtypes.EthUint64(et.Msg.GasLimit),
+ Input: nil,
+ Value: ethtypes.EthBigInt(et.Msg.Value),
+
+ FilecoinFrom: et.Msg.From,
+ FilecoinTo: et.Msg.To,
+ FilecoinMethod: et.Msg.Method,
+ FilecoinCodeCid: et.Msg.CodeCid,
+ },
+ Result: ethtypes.EthTraceResult{
+ GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas),
+ Output: nil,
+ },
+ Subtraces: 0, // will be updated by the children once they are added to the trace
+ TraceAddress: addr,
+
+ Parent: parent,
+ LastByteCode: nil,
+ }
+
+ trace.SetCallType("call")
+
+ if et.Msg.Method == builtin.MethodsEVM.InvokeContract {
+ log.Debugf("COND1 found InvokeContract call at height: %d", height)
+
+ // TODO: ignore return errors since actors can send gibberish and we don't want
+ // to fail the whole trace in that case
+ trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ } else if et.Msg.To == builtin.EthereumAddressManagerActorAddr &&
+ et.Msg.Method == builtin.MethodsEAM.CreateExternal {
+ log.Debugf("COND2 found CreateExternal call at height: %d", height)
+ trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+
+ if et.MsgRct.ExitCode.IsSuccess() {
+ // ignore return value
+ trace.Result.Output = nil
+ } else {
+ // return value is the error message
+ trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ }
+
+ // treat this as a contract creation
+ trace.SetCallType("create")
+ } else {
+ // we are going to assume a native method, but we may change it in one of the edge cases below
+ // TODO: only do this if we know it's a native method (optimization)
+ trace.Action.Input, err = handleFilecoinMethodInput(et.Msg.Method, et.Msg.ParamsCodec, et.Msg.Params)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ trace.Result.Output, err = handleFilecoinMethodOutput(et.MsgRct.ExitCode, et.MsgRct.ReturnCodec, et.MsgRct.Return)
+ if err != nil {
+ return xerrors.Errorf("buildTraces: %w", err)
+ }
+ }
+
+ // TODO: is it OK to check this here or is this only specific to certain edge case (evm to evm)?
+ if et.Msg.ReadOnly {
+ trace.SetCallType("staticcall")
+ }
+
+ // there are several edge cases that require special handling when displaying the traces. Note that while iterating over
+ // the traces we update the trace backwards (through the parent pointer)
+ if parent != nil {
+ // Handle Native actor creation
+ //
+ // Actor A calls to the init actor on method 2 and The init actor creates the target actor B then calls it on method 1
+ if parent.Action.FilecoinTo == builtin.InitActorAddr &&
+ parent.Action.FilecoinMethod == builtin.MethodsInit.Exec &&
+ et.Msg.Method == builtin.MethodConstructor {
+ log.Debugf("COND3 Native actor creation! method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height)
+ parent.SetCallType("create")
+ parent.Action.To = to
+ parent.Action.Input = []byte{0xFE}
+ parent.Result.Output = nil
+
+ // there should never be any subcalls when creating a native actor
+ //
+ // TODO: add support for native actors calling another when created
+ return nil
+ }
+
+ // Handle EVM contract creation
+ //
+ // To detect EVM contract creation we need to check for the following sequence of events:
+ //
+ // 1) EVM contract A calls the EAM (Ethereum Address Manager) on method 2 (create) or 3 (create2).
+ // 2) The EAM calls the init actor on method 3 (Exec4).
+ // 3) The init actor creates the target actor B then calls it on method 1.
+ if parent.Parent != nil {
+ calledCreateOnEAM := parent.Parent.Action.FilecoinTo == builtin.EthereumAddressManagerActorAddr &&
+ (parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create || parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create2)
+ eamCalledInitOnExec4 := parent.Action.FilecoinTo == builtin.InitActorAddr &&
+ parent.Action.FilecoinMethod == builtin.MethodsInit.Exec4
+ initCreatedActor := trace.Action.FilecoinMethod == builtin.MethodConstructor
+
+ // TODO: We need to handle failures in contract creations and support resurrections on an existing but dead EVM actor)
+ if calledCreateOnEAM && eamCalledInitOnExec4 && initCreatedActor {
+ log.Debugf("COND4 EVM contract creation method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height)
+
+ if parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create {
+ parent.Parent.SetCallType("create")
+ } else {
+ parent.Parent.SetCallType("create2")
+ }
+
+ // update the parent.parent to make this
+ parent.Parent.Action.To = trace.Action.To
+ parent.Parent.Subtraces = 0
+
+ // delete the parent (the EAM) and skip the current trace (init)
+ *traces = (*traces)[:len(*traces)-1]
+
+ return nil
+ }
+ }
+
+ if builtinactors.IsEvmActor(parent.Action.FilecoinCodeCid) {
+ // Handle delegate calls
+ //
+ // 1) Look for trace from an EVM actor to itself on InvokeContractDelegate, method 6.
+ // 2) Check that the previous trace calls another actor on method 3 (GetByteCode) and they are at the same level (same parent)
+ // 3) Treat this as a delegate call to actor A.
+ if parent.LastByteCode != nil && trace.Action.From == trace.Action.To &&
+ trace.Action.FilecoinMethod == builtin.MethodsEVM.InvokeContractDelegate {
+ log.Debugf("COND7 found delegate call, height: %d", height)
+ prev := parent.LastByteCode
+ if prev.Action.From == trace.Action.From && prev.Action.FilecoinMethod == builtin.MethodsEVM.GetBytecode && prev.Parent == trace.Parent {
+ trace.SetCallType("delegatecall")
+ trace.Action.To = prev.Action.To
+
+ var dp evm.DelegateCallParams
+ err := dp.UnmarshalCBOR(bytes.NewReader(et.Msg.Params))
+ if err != nil {
+ return xerrors.Errorf("failed UnmarshalCBOR: %w", err)
+ }
+ trace.Action.Input = dp.Input
+
+ trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec)
+ if err != nil {
+ return xerrors.Errorf("failed decodePayload: %w", err)
+ }
+ }
+ } else {
+ // Handle EVM call special casing
+ //
+ // Any outbound call from an EVM actor on methods 1-1023 are side-effects from EVM instructions
+ // and should be dropped from the trace.
+ if et.Msg.Method > 0 &&
+ et.Msg.Method <= 1023 {
+ log.Debugf("Infof found outbound call from an EVM actor on method 1-1023 method:%d, code:%s, height:%d", et.Msg.Method, parent.Action.FilecoinCodeCid.String(), height)
+
+ if et.Msg.Method == builtin.MethodsEVM.GetBytecode {
+ // save the last bytecode trace to handle delegate calls
+ parent.LastByteCode = trace
+ }
+
+ return nil
+ }
+ }
+ }
+
+ }
+
+ // we are adding trace to the traces so update the parent subtraces count as it was originally set to zero
+ if parent != nil {
+ parent.Subtraces++
+ }
+
+ *traces = append(*traces, trace)
+
+ for i, call := range et.Subcalls {
+ err := buildTraces(ctx, traces, trace, append(addr, i), call, height, sa)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func writePadded(w io.Writer, data any, size int) error {
+ tmp := &bytes.Buffer{}
+
+ // first write data to tmp buffer to get the size
+ err := binary.Write(tmp, binary.BigEndian, data)
+ if err != nil {
+ return fmt.Errorf("writePadded: failed writing tmp data to buffer: %w", err)
+ }
+
+ if tmp.Len() > size {
+ return fmt.Errorf("writePadded: data is larger than size")
+ }
+
+ // write tailing zeros to pad up to size
+ cnt := size - tmp.Len()
+ for i := 0; i < cnt; i++ {
+ err = binary.Write(w, binary.BigEndian, uint8(0))
+ if err != nil {
+ return fmt.Errorf("writePadded: failed writing tailing zeros to buffer: %w", err)
+ }
+ }
+
+ // finally write the actual value
+ err = binary.Write(w, binary.BigEndian, tmp.Bytes())
+ if err != nil {
+ return fmt.Errorf("writePadded: failed writing data to buffer: %w", err)
+ }
+
+ return nil
+}
+
+func handleFilecoinMethodInput(method abi.MethodNum, codec uint64, params []byte) ([]byte, error) {
+ NATIVE_METHOD_SELECTOR := []byte{0x86, 0x8e, 0x10, 0xc4}
+ EVM_WORD_SIZE := 32
+
+ staticArgs := []uint64{
+ uint64(method),
+ codec,
+ uint64(EVM_WORD_SIZE) * 3,
+ uint64(len(params)),
+ }
+ totalWords := len(staticArgs) + (len(params) / EVM_WORD_SIZE)
+ if len(params)%EVM_WORD_SIZE != 0 {
+ totalWords++
+ }
+ len := 4 + totalWords*EVM_WORD_SIZE
+
+ w := &bytes.Buffer{}
+ err := binary.Write(w, binary.BigEndian, NATIVE_METHOD_SELECTOR)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing method selector: %w", err)
+ }
+
+ for _, arg := range staticArgs {
+ err := writePadded(w, arg, 32)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: %w", err)
+ }
+ }
+ err = binary.Write(w, binary.BigEndian, params)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing params: %w", err)
+ }
+ remain := len - w.Len()
+ for i := 0; i < remain; i++ {
+ err = binary.Write(w, binary.BigEndian, uint8(0))
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing tailing zeros: %w", err)
+ }
+ }
+
+ return w.Bytes(), nil
+}
+
+func handleFilecoinMethodOutput(exitCode exitcode.ExitCode, codec uint64, data []byte) ([]byte, error) {
+ w := &bytes.Buffer{}
+
+ values := []interface{}{uint32(exitCode), codec, uint32(w.Len()), uint32(len(data))}
+ for _, v := range values {
+ err := writePadded(w, v, 32)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodOutput: %w", err)
+ }
+ }
+
+ err := binary.Write(w, binary.BigEndian, data)
+ if err != nil {
+ return nil, fmt.Errorf("handleFilecoinMethodOutput: failed writing data: %w", err)
+ }
+
+ return w.Bytes(), nil
+}
diff --git a/node/impl/full/eth_utils.go b/node/impl/full/eth_utils.go
new file mode 100644
index 000000000..5908c9412
--- /dev/null
+++ b/node/impl/full/eth_utils.go
@@ -0,0 +1,689 @@
+package full
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/ipfs/go-cid"
+ "golang.org/x/xerrors"
+
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/big"
+ builtintypes "github.com/filecoin-project/go-state-types/builtin"
+ "github.com/filecoin-project/go-state-types/builtin/v10/eam"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/actors"
+ "github.com/filecoin-project/lotus/chain/store"
+ "github.com/filecoin-project/lotus/chain/types"
+ "github.com/filecoin-project/lotus/chain/types/ethtypes"
+ "github.com/filecoin-project/lotus/chain/vm"
+)
+
+func getTipsetByBlockNumber(ctx context.Context, chain *store.ChainStore, blkParam string, strict bool) (*types.TipSet, error) {
+ if blkParam == "earliest" {
+ return nil, fmt.Errorf("block param \"earliest\" is not supported")
+ }
+
+ head := chain.GetHeaviestTipSet()
+ switch blkParam {
+ case "pending":
+ return head, nil
+ case "latest":
+ parent, err := chain.GetTipSetFromKey(ctx, head.Parents())
+ if err != nil {
+ return nil, fmt.Errorf("cannot get parent tipset")
+ }
+ return parent, nil
+ default:
+ var num ethtypes.EthUint64
+ err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`))
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse block number: %v", err)
+ }
+ if abi.ChainEpoch(num) > head.Height()-1 {
+ return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
+ }
+ ts, err := chain.GetTipsetByHeight(ctx, abi.ChainEpoch(num), head, true)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset at height: %v", num)
+ }
+ if strict && ts.Height() != abi.ChainEpoch(num) {
+ return nil, ErrNullRound
+ }
+ return ts, nil
+ }
+}
+
+func getTipsetByEthBlockNumberOrHash(ctx context.Context, chain *store.ChainStore, blkParam ethtypes.EthBlockNumberOrHash) (*types.TipSet, error) {
+ head := chain.GetHeaviestTipSet()
+
+ predefined := blkParam.PredefinedBlock
+ if predefined != nil {
+ if *predefined == "earliest" {
+ return nil, fmt.Errorf("block param \"earliest\" is not supported")
+ } else if *predefined == "pending" {
+ return head, nil
+ } else if *predefined == "latest" {
+ parent, err := chain.GetTipSetFromKey(ctx, head.Parents())
+ if err != nil {
+ return nil, fmt.Errorf("cannot get parent tipset")
+ }
+ return parent, nil
+ } else {
+ return nil, fmt.Errorf("unknown predefined block %s", *predefined)
+ }
+ }
+
+ if blkParam.BlockNumber != nil {
+ height := abi.ChainEpoch(*blkParam.BlockNumber)
+ if height > head.Height()-1 {
+ return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
+ }
+ ts, err := chain.GetTipsetByHeight(ctx, height, head, true)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset at height: %v", height)
+ }
+ return ts, nil
+ }
+
+ if blkParam.BlockHash != nil {
+ ts, err := chain.GetTipSetByCid(ctx, blkParam.BlockHash.ToCid())
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset by hash: %v", err)
+ }
+
+ // verify that the tipset is in the canonical chain
+ if blkParam.RequireCanonical {
+ // walk up the current chain (our head) until we reach ts.Height()
+ walkTs, err := chain.GetTipsetByHeight(ctx, ts.Height(), head, true)
+ if err != nil {
+ return nil, fmt.Errorf("cannot get tipset at height: %v", ts.Height())
+ }
+
+ // verify that it equals the expected tipset
+ if !walkTs.Equals(ts) {
+ return nil, fmt.Errorf("tipset is not canonical")
+ }
+ }
+
+ return ts, nil
+ }
+
+ return nil, errors.New("invalid block param")
+}
+
+func ethCallToFilecoinMessage(ctx context.Context, tx ethtypes.EthCall) (*types.Message, error) {
+ var from address.Address
+ if tx.From == nil || *tx.From == (ethtypes.EthAddress{}) {
+ // Send from the filecoin "system" address.
+ var err error
+ from, err = (ethtypes.EthAddress{}).ToFilecoinAddress()
+ if err != nil {
+ return nil, fmt.Errorf("failed to construct the ethereum system address: %w", err)
+ }
+ } else {
+ // The from address must be translatable to an f4 address.
+ var err error
+ from, err = tx.From.ToFilecoinAddress()
+ if err != nil {
+ return nil, fmt.Errorf("failed to translate sender address (%s): %w", tx.From.String(), err)
+ }
+ if p := from.Protocol(); p != address.Delegated {
+ return nil, fmt.Errorf("expected a class 4 address, got: %d: %w", p, err)
+ }
+ }
+
+ var params []byte
+ if len(tx.Data) > 0 {
+ initcode := abi.CborBytes(tx.Data)
+ params2, err := actors.SerializeParams(&initcode)
+ if err != nil {
+ return nil, fmt.Errorf("failed to serialize params: %w", err)
+ }
+ params = params2
+ }
+
+ var to address.Address
+ var method abi.MethodNum
+ if tx.To == nil {
+ // this is a contract creation
+ to = builtintypes.EthereumAddressManagerActorAddr
+ method = builtintypes.MethodsEAM.CreateExternal
+ } else {
+ addr, err := tx.To.ToFilecoinAddress()
+ if err != nil {
+ return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
+ }
+ to = addr
+ method = builtintypes.MethodsEVM.InvokeContract
+ }
+
+ return &types.Message{
+ From: from,
+ To: to,
+ Value: big.Int(tx.Value),
+ Method: method,
+ Params: params,
+ GasLimit: build.BlockGasLimit,
+ GasFeeCap: big.Zero(),
+ GasPremium: big.Zero(),
+ }, nil
+}
+
+func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTxInfo bool, cs *store.ChainStore, sa StateAPI) (ethtypes.EthBlock, error) {
+ parentKeyCid, err := ts.Parents().Cid()
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+ parentBlkHash, err := ethtypes.EthHashFromCid(parentKeyCid)
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+
+ bn := ethtypes.EthUint64(ts.Height())
+
+ blkCid, err := ts.Key().Cid()
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+ blkHash, err := ethtypes.EthHashFromCid(blkCid)
+ if err != nil {
+ return ethtypes.EthBlock{}, err
+ }
+
+ msgs, rcpts, err := messagesAndReceipts(ctx, ts, cs, sa)
+ if err != nil {
+ return ethtypes.EthBlock{}, xerrors.Errorf("failed to retrieve messages and receipts: %w", err)
+ }
+
+ block := ethtypes.NewEthBlock(len(msgs) > 0)
+
+ gasUsed := int64(0)
+ for i, msg := range msgs {
+ rcpt := rcpts[i]
+ ti := ethtypes.EthUint64(i)
+ gasUsed += rcpt.GasUsed
+ var smsg *types.SignedMessage
+ switch msg := msg.(type) {
+ case *types.SignedMessage:
+ smsg = msg
+ case *types.Message:
+ smsg = &types.SignedMessage{
+ Message: *msg,
+ Signature: crypto.Signature{
+ Type: crypto.SigTypeBLS,
+ },
+ }
+ default:
+ return ethtypes.EthBlock{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.Cid(), err)
+ }
+ tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
+ if err != nil {
+ return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err)
+ }
+
+ tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
+ tx.BlockHash = &blkHash
+ tx.BlockNumber = &bn
+ tx.TransactionIndex = &ti
+
+ if fullTxInfo {
+ block.Transactions = append(block.Transactions, tx)
+ } else {
+ block.Transactions = append(block.Transactions, tx.Hash.String())
+ }
+ }
+
+ block.Hash = blkHash
+ block.Number = bn
+ block.ParentHash = parentBlkHash
+ block.Timestamp = ethtypes.EthUint64(ts.Blocks()[0].Timestamp)
+ block.BaseFeePerGas = ethtypes.EthBigInt{Int: ts.Blocks()[0].ParentBaseFee.Int}
+ block.GasUsed = ethtypes.EthUint64(gasUsed)
+ return block, nil
+}
+
+func messagesAndReceipts(ctx context.Context, ts *types.TipSet, cs *store.ChainStore, sa StateAPI) ([]types.ChainMsg, []types.MessageReceipt, error) {
+ msgs, err := cs.MessagesForTipset(ctx, ts)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err)
+ }
+
+ _, rcptRoot, err := sa.StateManager.TipSetState(ctx, ts)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("failed to compute state: %w", err)
+ }
+
+ rcpts, err := cs.ReadReceipts(ctx, rcptRoot)
+ if err != nil {
+ return nil, nil, xerrors.Errorf("error loading receipts for tipset: %v: %w", ts, err)
+ }
+
+ if len(msgs) != len(rcpts) {
+ return nil, nil, xerrors.Errorf("receipts and message array lengths didn't match for tipset: %v: %w", ts, err)
+ }
+
+ return msgs, rcpts, nil
+}
+
+const errorFunctionSelector = "\x08\xc3\x79\xa0" // Error(string)
+const panicFunctionSelector = "\x4e\x48\x7b\x71" // Panic(uint256)
+// Eth ABI (solidity) panic codes.
+var panicErrorCodes map[uint64]string = map[uint64]string{
+ 0x00: "Panic()",
+ 0x01: "Assert()",
+ 0x11: "ArithmeticOverflow()",
+ 0x12: "DivideByZero()",
+ 0x21: "InvalidEnumVariant()",
+ 0x22: "InvalidStorageArray()",
+ 0x31: "PopEmptyArray()",
+ 0x32: "ArrayIndexOutOfBounds()",
+ 0x41: "OutOfMemory()",
+ 0x51: "CalledUninitializedFunction()",
+}
+
+// Parse an ABI encoded revert reason. This reason should be encoded as if it were the parameters to
+// an `Error(string)` function call.
+//
+// See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require
+func parseEthRevert(ret []byte) string {
+ if len(ret) == 0 {
+ return "none"
+ }
+ var cbytes abi.CborBytes
+ if err := cbytes.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
+ return "ERROR: revert reason is not cbor encoded bytes"
+ }
+ if len(cbytes) == 0 {
+ return "none"
+ }
+ // If it's not long enough to contain an ABI encoded response, return immediately.
+ if len(cbytes) < 4+32 {
+ return ethtypes.EthBytes(cbytes).String()
+ }
+ switch string(cbytes[:4]) {
+ case panicFunctionSelector:
+ cbytes := cbytes[4 : 4+32]
+ // Read the and check the code.
+ code, err := ethtypes.EthUint64FromBytes(cbytes)
+ if err != nil {
+ // If it's too big, just return the raw value.
+ codeInt := big.PositiveFromUnsignedBytes(cbytes)
+ return fmt.Sprintf("Panic(%s)", ethtypes.EthBigInt(codeInt).String())
+ }
+ if s, ok := panicErrorCodes[uint64(code)]; ok {
+ return s
+ }
+ return fmt.Sprintf("Panic(0x%x)", code)
+ case errorFunctionSelector:
+ cbytes := cbytes[4:]
+ cbytesLen := ethtypes.EthUint64(len(cbytes))
+ // Read the and check the offset.
+ offset, err := ethtypes.EthUint64FromBytes(cbytes[:32])
+ if err != nil {
+ break
+ }
+ if cbytesLen < offset {
+ break
+ }
+
+ // Read and check the length.
+ if cbytesLen-offset < 32 {
+ break
+ }
+ start := offset + 32
+ length, err := ethtypes.EthUint64FromBytes(cbytes[offset : offset+32])
+ if err != nil {
+ break
+ }
+ if cbytesLen-start < length {
+ break
+ }
+ // Slice the error message.
+ return fmt.Sprintf("Error(%s)", cbytes[start:start+length])
+ }
+ return ethtypes.EthBytes(cbytes).String()
+}
+
+// lookupEthAddress makes its best effort at finding the Ethereum address for a
+// Filecoin address. It does the following:
+//
+// 1. If the supplied address is an f410 address, we return its payload as the EthAddress.
+// 2. Otherwise (f0, f1, f2, f3), we look up the actor on the state tree. If it has a delegated address, we return it if it's f410 address.
+// 3. Otherwise, we fall back to returning a masked ID Ethereum address. If the supplied address is an f0 address, we
+// use that ID to form the masked ID address.
+// 4. Otherwise, we fetch the actor's ID from the state tree and form the masked ID with it.
+func lookupEthAddress(ctx context.Context, addr address.Address, sa StateAPI) (ethtypes.EthAddress, error) {
+ // BLOCK A: We are trying to get an actual Ethereum address from an f410 address.
+ // Attempt to convert directly, if it's an f4 address.
+ ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(addr)
+ if err == nil && !ethAddr.IsMaskedID() {
+ return ethAddr, nil
+ }
+
+ // Lookup on the target actor and try to get an f410 address.
+ if actor, err := sa.StateGetActor(ctx, addr, types.EmptyTSK); err != nil {
+ return ethtypes.EthAddress{}, err
+ } else if actor.Address != nil {
+ if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() {
+ return ethAddr, nil
+ }
+ }
+
+ // BLOCK B: We gave up on getting an actual Ethereum address and are falling back to a Masked ID address.
+ // Check if we already have an ID addr, and use it if possible.
+ if err == nil && ethAddr.IsMaskedID() {
+ return ethAddr, nil
+ }
+
+ // Otherwise, resolve the ID addr.
+ idAddr, err := sa.StateLookupID(ctx, addr, types.EmptyTSK)
+ if err != nil {
+ return ethtypes.EthAddress{}, err
+ }
+ return ethtypes.EthAddressFromFilecoinAddress(idAddr)
+}
+
+func parseEthTopics(topics ethtypes.EthTopicSpec) (map[string][][]byte, error) {
+ keys := map[string][][]byte{}
+ for idx, vals := range topics {
+ if len(vals) == 0 {
+ continue
+ }
+ // Ethereum topics are emitted using `LOG{0..4}` opcodes resulting in topics1..4
+ key := fmt.Sprintf("t%d", idx+1)
+ for _, v := range vals {
+ v := v // copy the ethhash to avoid repeatedly referencing the same one.
+ keys[key] = append(keys[key], v[:])
+ }
+ }
+ return keys, nil
+}
+
+func ethTxHashFromMessageCid(ctx context.Context, c cid.Cid, sa StateAPI) (ethtypes.EthHash, error) {
+ smsg, err := sa.Chain.GetSignedMessage(ctx, c)
+ if err == nil {
+ // This is an Eth Tx, Secp message, Or BLS message in the mpool
+ return ethTxHashFromSignedMessage(ctx, smsg, sa)
+ }
+
+ _, err = sa.Chain.GetMessage(ctx, c)
+ if err == nil {
+ // This is a BLS message
+ return ethtypes.EthHashFromCid(c)
+ }
+
+ return ethtypes.EmptyEthHash, nil
+}
+
+func ethTxHashFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthHash, error) {
+ if smsg.Signature.Type == crypto.SigTypeDelegated {
+ ethTx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
+ if err != nil {
+ return ethtypes.EmptyEthHash, err
+ }
+ return ethTx.Hash, nil
+ } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 {
+ return ethtypes.EthHashFromCid(smsg.Cid())
+ } else { // BLS message
+ return ethtypes.EthHashFromCid(smsg.Message.Cid())
+ }
+}
+
+func newEthTxFromSignedMessage(ctx context.Context, smsg *types.SignedMessage, sa StateAPI) (ethtypes.EthTx, error) {
+ var tx ethtypes.EthTx
+ var err error
+
+ // This is an eth tx
+ if smsg.Signature.Type == crypto.SigTypeDelegated {
+ tx, err = ethtypes.EthTxFromSignedEthMessage(smsg)
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to convert from signed message: %w", err)
+ }
+
+ tx.Hash, err = tx.TxHash()
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to calculate hash for ethTx: %w", err)
+ }
+
+ fromAddr, err := lookupEthAddress(ctx, smsg.Message.From, sa)
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
+ }
+
+ tx.From = fromAddr
+ } else if smsg.Signature.Type == crypto.SigTypeSecp256k1 { // Secp Filecoin Message
+ tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
+ tx.Hash, err = ethtypes.EthHashFromCid(smsg.Cid())
+ if err != nil {
+ return tx, err
+ }
+ } else { // BLS Filecoin message
+ tx = ethTxFromNativeMessage(ctx, smsg.VMMessage(), sa)
+ tx.Hash, err = ethtypes.EthHashFromCid(smsg.Message.Cid())
+ if err != nil {
+ return tx, err
+ }
+ }
+
+ return tx, nil
+}
+
+// ethTxFromNativeMessage does NOT populate:
+// - BlockHash
+// - BlockNumber
+// - TransactionIndex
+// - Hash
+func ethTxFromNativeMessage(ctx context.Context, msg *types.Message, sa StateAPI) ethtypes.EthTx {
+ // We don't care if we error here, conversion is best effort for non-eth transactions
+ from, _ := lookupEthAddress(ctx, msg.From, sa)
+ to, _ := lookupEthAddress(ctx, msg.To, sa)
+ return ethtypes.EthTx{
+ To: &to,
+ From: from,
+ Nonce: ethtypes.EthUint64(msg.Nonce),
+ ChainID: ethtypes.EthUint64(build.Eip155ChainId),
+ Value: ethtypes.EthBigInt(msg.Value),
+ Type: ethtypes.Eip1559TxType,
+ Gas: ethtypes.EthUint64(msg.GasLimit),
+ MaxFeePerGas: ethtypes.EthBigInt(msg.GasFeeCap),
+ MaxPriorityFeePerGas: ethtypes.EthBigInt(msg.GasPremium),
+ AccessList: []ethtypes.EthHash{},
+ }
+}
+
+func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) {
+ smsg, err := cs.GetSignedMessage(ctx, msgCid)
+ if err != nil {
+ // We couldn't find the signed message, it might be a BLS message, so search for a regular message.
+ msg, err := cs.GetMessage(ctx, msgCid)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to find msg %s: %w", msgCid, err)
+ }
+ smsg = &types.SignedMessage{
+ Message: *msg,
+ Signature: crypto.Signature{
+ Type: crypto.SigTypeBLS,
+ },
+ }
+ }
+
+ return smsg, nil
+}
+
+// newEthTxFromMessageLookup creates an ethereum transaction from filecoin message lookup. If a negative txIdx is passed
+// into the function, it looks up the transaction index of the message in the tipset, otherwise it uses the txIdx passed into the
+// function
+func newEthTxFromMessageLookup(ctx context.Context, msgLookup *api.MsgLookup, txIdx int, cs *store.ChainStore, sa StateAPI) (ethtypes.EthTx, error) {
+ ts, err := cs.LoadTipSet(ctx, msgLookup.TipSet)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ // This tx is located in the parent tipset
+ parentTs, err := cs.LoadTipSet(ctx, ts.Parents())
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ parentTsCid, err := parentTs.Key().Cid()
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ // lookup the transactionIndex
+ if txIdx < 0 {
+ msgs, err := cs.MessagesForTipset(ctx, parentTs)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+ for i, msg := range msgs {
+ if msg.Cid() == msgLookup.Message {
+ txIdx = i
+ break
+ }
+ }
+ if txIdx < 0 {
+ return ethtypes.EthTx{}, fmt.Errorf("cannot find the msg in the tipset")
+ }
+ }
+
+ blkHash, err := ethtypes.EthHashFromCid(parentTsCid)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ smsg, err := getSignedMessage(ctx, cs, msgLookup.Message)
+ if err != nil {
+ return ethtypes.EthTx{}, xerrors.Errorf("failed to get signed msg: %w", err)
+ }
+
+ tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
+ if err != nil {
+ return ethtypes.EthTx{}, err
+ }
+
+ var (
+ bn = ethtypes.EthUint64(parentTs.Height())
+ ti = ethtypes.EthUint64(txIdx)
+ )
+
+ tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
+ tx.BlockHash = &blkHash
+ tx.BlockNumber = &bn
+ tx.TransactionIndex = &ti
+ return tx, nil
+}
+
+func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLookup, events []types.Event, cs *store.ChainStore, sa StateAPI) (api.EthTxReceipt, error) {
+ var (
+ transactionIndex ethtypes.EthUint64
+ blockHash ethtypes.EthHash
+ blockNumber ethtypes.EthUint64
+ )
+
+ if tx.TransactionIndex != nil {
+ transactionIndex = *tx.TransactionIndex
+ }
+ if tx.BlockHash != nil {
+ blockHash = *tx.BlockHash
+ }
+ if tx.BlockNumber != nil {
+ blockNumber = *tx.BlockNumber
+ }
+
+ receipt := api.EthTxReceipt{
+ TransactionHash: tx.Hash,
+ From: tx.From,
+ To: tx.To,
+ TransactionIndex: transactionIndex,
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ Type: ethtypes.EthUint64(2),
+ Logs: []ethtypes.EthLog{}, // empty log array is compulsory when no logs, or libraries like ethers.js break
+ LogsBloom: ethtypes.EmptyEthBloom[:],
+ }
+
+ if lookup.Receipt.ExitCode.IsSuccess() {
+ receipt.Status = 1
+ } else {
+ receipt.Status = 0
+ }
+
+ receipt.GasUsed = ethtypes.EthUint64(lookup.Receipt.GasUsed)
+
+ // TODO: handle CumulativeGasUsed
+ receipt.CumulativeGasUsed = ethtypes.EmptyEthInt
+
+ // TODO: avoid loading the tipset twice (once here, once when we convert the message to a txn)
+ ts, err := cs.GetTipSetFromKey(ctx, lookup.TipSet)
+ if err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err)
+ }
+
+ baseFee := ts.Blocks()[0].ParentBaseFee
+ gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true)
+ totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn)
+
+ effectiveGasPrice := big.Zero()
+ if lookup.Receipt.GasUsed > 0 {
+ effectiveGasPrice = big.Div(totalSpent, big.NewInt(lookup.Receipt.GasUsed))
+ }
+ receipt.EffectiveGasPrice = ethtypes.EthBigInt(effectiveGasPrice)
+
+ if receipt.To == nil && lookup.Receipt.ExitCode.IsSuccess() {
+ // Create and Create2 return the same things.
+ var ret eam.CreateExternalReturn
+ if err := ret.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to parse contract creation result: %w", err)
+ }
+ addr := ethtypes.EthAddress(ret.EthAddress)
+ receipt.ContractAddress = &addr
+ }
+
+ if len(events) > 0 {
+ receipt.Logs = make([]ethtypes.EthLog, 0, len(events))
+ for i, evt := range events {
+ l := ethtypes.EthLog{
+ Removed: false,
+ LogIndex: ethtypes.EthUint64(i),
+ TransactionHash: tx.Hash,
+ TransactionIndex: transactionIndex,
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ }
+
+ data, topics, ok := ethLogFromEvent(evt.Entries)
+ if !ok {
+ // not an eth event.
+ continue
+ }
+ for _, topic := range topics {
+ ethtypes.EthBloomSet(receipt.LogsBloom, topic[:])
+ }
+ l.Data = data
+ l.Topics = topics
+
+ addr, err := address.NewIDAddress(uint64(evt.Emitter))
+ if err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to create ID address: %w", err)
+ }
+
+ l.Address, err = lookupEthAddress(ctx, addr, sa)
+ if err != nil {
+ return api.EthTxReceipt{}, xerrors.Errorf("failed to resolve Ethereum address: %w", err)
+ }
+
+ ethtypes.EthBloomSet(receipt.LogsBloom, l.Address[:])
+ receipt.Logs = append(receipt.Logs, l)
+ }
+ }
+
+ return receipt, nil
+}
diff --git a/node/impl/full/state.go b/node/impl/full/state.go
index 78f450626..514951675 100644
--- a/node/impl/full/state.go
+++ b/node/impl/full/state.go
@@ -1752,7 +1752,34 @@ func (a *StateAPI) StateGetRandomnessFromTickets(ctx context.Context, personaliz
func (a *StateAPI) StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) {
return a.StateManager.GetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
+}
+func (a *StateAPI) StateGetRandomnessDigestFromTickets(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) {
+ ts, err := a.Chain.GetTipSetFromKey(ctx, tsk)
+ if err != nil {
+ return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ }
+
+ ret, err := a.StateManager.GetRandomnessDigestFromTickets(ctx, randEpoch, ts.Key())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get randomness digest from tickets: %w", err)
+ }
+
+ return ret[:], nil
+}
+
+func (a *StateAPI) StateGetRandomnessDigestFromBeacon(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) {
+ ts, err := a.Chain.GetTipSetFromKey(ctx, tsk)
+ if err != nil {
+ return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err)
+ }
+
+ ret, err := a.StateManager.GetRandomnessDigestFromBeacon(ctx, randEpoch, ts.Key())
+ if err != nil {
+ return nil, xerrors.Errorf("failed to get randomness digest from tickets: %w", err)
+ }
+
+ return ret[:], nil
}
func (a *StateAPI) StateGetBeaconEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
@@ -1786,6 +1813,7 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam
ConsensusMinerMinPower: build.ConsensusMinerMinPower,
SupportedProofTypes: build.SupportedProofTypes,
PreCommitChallengeDelay: build.PreCommitChallengeDelay,
+ Eip155ChainID: build.Eip155ChainId,
ForkUpgradeParams: api.ForkUpgradeParams{
UpgradeSmokeHeight: build.UpgradeSmokeHeight,
UpgradeBreezeHeight: build.UpgradeBreezeHeight,
@@ -1811,6 +1839,7 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam
UpgradeHyggeHeight: build.UpgradeHyggeHeight,
UpgradeLightningHeight: build.UpgradeLightningHeight,
UpgradeThunderHeight: build.UpgradeThunderHeight,
+ UpgradeWatermelonHeight: build.UpgradeWatermelonHeight,
},
}, nil
}
diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go
index 9be43338e..223f5c29e 100644
--- a/node/impl/full/sync.go
+++ b/node/impl/full/sync.go
@@ -58,9 +58,16 @@ func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) erro
}
if a.SlashFilter != nil && os.Getenv("LOTUS_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" {
- if _, err = a.SlashFilter.MinedBlock(ctx, blk.Header, parent.Height); err != nil {
- log.Errorf(" SLASH FILTER ERROR: %s", err)
- return xerrors.Errorf(" SLASH FILTER ERROR: %w", err)
+ witness, fault, err := a.SlashFilter.MinedBlock(ctx, blk.Header, parent.Height)
+ if err != nil {
+ log.Errorf(" SLASH FILTER ERRORED: %s", err)
+ // Return an error here, because it's _probably_ wiser to not submit this block
+ return xerrors.Errorf(" SLASH FILTER ERRORED: %w", err)
+ }
+
+ if fault {
+ log.Errorf(" SLASH FILTER DETECTED FAULT due to witness %s", witness)
+ return xerrors.Errorf(" SLASH FILTER DETECTED FAULT due to witness %s", witness)
}
}
diff --git a/node/impl/full/txhashmanager.go b/node/impl/full/txhashmanager.go
new file mode 100644
index 000000000..6757cc6dd
--- /dev/null
+++ b/node/impl/full/txhashmanager.go
@@ -0,0 +1,129 @@
+package full
+
+import (
+ "context"
+ "time"
+
+ "github.com/filecoin-project/go-state-types/abi"
+ "github.com/filecoin-project/go-state-types/crypto"
+
+ "github.com/filecoin-project/lotus/api"
+ "github.com/filecoin-project/lotus/build"
+ "github.com/filecoin-project/lotus/chain/ethhashlookup"
+ "github.com/filecoin-project/lotus/chain/types"
+)
+
+type EthTxHashManager struct {
+ StateAPI StateAPI
+ TransactionHashLookup *ethhashlookup.EthTxHashLookup
+}
+
+func (m *EthTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error {
+ return nil
+}
+
+func (m *EthTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error {
+ if minHeight < build.UpgradeHyggeHeight {
+ minHeight = build.UpgradeHyggeHeight
+ }
+
+ ts := m.StateAPI.Chain.GetHeaviestTipSet()
+ for ts.Height() > minHeight {
+ for _, block := range ts.Blocks() {
+ msgs, err := m.StateAPI.Chain.SecpkMessagesForBlock(ctx, block)
+ if err != nil {
+ // If we can't find the messages, we've either imported from snapshot or pruned the store
+ log.Debug("exiting message mapping population at epoch ", ts.Height())
+ return nil
+ }
+
+ for _, msg := range msgs {
+ m.ProcessSignedMessage(ctx, msg)
+ }
+ }
+
+ var err error
+ ts, err = m.StateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents())
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *EthTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error {
+ for _, blk := range to.Blocks() {
+ _, smsgs, err := m.StateAPI.Chain.MessagesForBlock(ctx, blk)
+ if err != nil {
+ return err
+ }
+
+ for _, smsg := range smsgs {
+ if smsg.Signature.Type != crypto.SigTypeDelegated {
+ continue
+ }
+
+ hash, err := ethTxHashFromSignedMessage(ctx, smsg, m.StateAPI)
+ if err != nil {
+ return err
+ }
+
+ err = m.TransactionHashLookup.UpsertHash(hash, smsg.Cid())
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (m *EthTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) {
+ if msg.Signature.Type != crypto.SigTypeDelegated {
+ return
+ }
+
+ ethTx, err := newEthTxFromSignedMessage(ctx, msg, m.StateAPI)
+ if err != nil {
+ log.Errorf("error converting filecoin message to eth tx: %s", err)
+ return
+ }
+
+ err = m.TransactionHashLookup.UpsertHash(ethTx.Hash, msg.Cid())
+ if err != nil {
+ log.Errorf("error inserting tx mapping to db: %s", err)
+ return
+ }
+}
+
+func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager *EthTxHashManager) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case u := <-ch:
+ if u.Type != api.MpoolAdd {
+ continue
+ }
+
+ manager.ProcessSignedMessage(ctx, u.Message)
+ }
+ }
+}
+
+func EthTxHashGC(ctx context.Context, retentionDays int, manager *EthTxHashManager) {
+ if retentionDays == 0 {
+ return
+ }
+
+ gcPeriod := 1 * time.Hour
+ for {
+ entriesDeleted, err := manager.TransactionHashLookup.DeleteEntriesOlderThan(retentionDays)
+ if err != nil {
+ log.Errorf("error garbage collecting eth transaction hash database: %s", err)
+ }
+ log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted)
+ time.Sleep(gcPeriod)
+ }
+}
diff --git a/node/impl/storminer.go b/node/impl/storminer.go
index 4932e0504..0ea746e91 100644
--- a/node/impl/storminer.go
+++ b/node/impl/storminer.go
@@ -281,7 +281,16 @@ func (sm *StorageMinerAPI) SectorUnseal(ctx context.Context, sectorNum abi.Secto
ProofType: status.SealProof,
}
- return sm.StorageMgr.SectorsUnsealPiece(ctx, sector, storiface.UnpaddedByteIndex(0), abi.UnpaddedPieceSize(0), status.Ticket.Value, status.CommD)
+ bgCtx := context.Background()
+
+ go func() {
+ err := sm.StorageMgr.SectorsUnsealPiece(bgCtx, sector, storiface.UnpaddedByteIndex(0), abi.UnpaddedPieceSize(0), status.Ticket.Value, status.CommD)
+ if err != nil {
+ log.Errorf("unseal for sector %d failed: %+v", sectorNum, err)
+ }
+ }()
+
+ return nil
}
// List all staged sectors
diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go
index e871ea005..4ce04cefd 100644
--- a/node/modules/actorevent.go
+++ b/node/modules/actorevent.go
@@ -126,8 +126,6 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo
MaxFilterResults: cfg.Events.MaxFilterResults,
}
- const ChainHeadConfidence = 1
-
lc.Append(fx.Hook{
OnStart: func(context.Context) error {
ev, err := events.NewEvents(ctx, &evapi)
diff --git a/node/modules/faultreport.go b/node/modules/faultreport.go
new file mode 100644
index 000000000..c42602d7e
--- /dev/null
+++ b/node/modules/faultreport.go
@@ -0,0 +1,27 @@
+package modules
+
+import (
+ "go.uber.org/fx"
+
+ "github.com/filecoin-project/lotus/chain/gen/slashfilter/slashsvc"
+ "github.com/filecoin-project/lotus/node/config"
+ "github.com/filecoin-project/lotus/node/impl/full"
+ "github.com/filecoin-project/lotus/node/modules/helpers"
+)
+
+type consensusReporterModules struct {
+ fx.In
+
+ full.WalletAPI
+ full.ChainAPI
+ full.MpoolAPI
+ full.SyncAPI
+}
+
+func RunConsensusFaultReporter(config config.FaultReporterConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, mod consensusReporterModules) error {
+ return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, mod consensusReporterModules) error {
+ ctx := helpers.LifecycleCtx(mctx, lc)
+
+ return slashsvc.SlashConsensus(ctx, &mod, config.ConsensusFaultReporterDataDir, config.ConsensusFaultReporterAddress)
+ }
+}
diff --git a/node/modules/lp2p/pubsub.go b/node/modules/lp2p/pubsub.go
index 33a03f844..2b3efce6c 100644
--- a/node/modules/lp2p/pubsub.go
+++ b/node/modules/lp2p/pubsub.go
@@ -559,6 +559,7 @@ func (trw *tracerWrapper) Trace(evt *pubsub_pb.TraceEvent) {
}
case pubsub_pb.TraceEvent_PRUNE:
+ stats.Record(context.TODO(), metrics.PubsubPruneMessage.M(1))
if trw.traceMessage(evt.GetPrune().GetTopic()) {
if trw.lp2pTracer != nil {
trw.lp2pTracer.Trace(evt)
diff --git a/node/modules/lp2p/rcmgr.go b/node/modules/lp2p/rcmgr.go
index 0035ed05b..f2b284986 100644
--- a/node/modules/lp2p/rcmgr.go
+++ b/node/modules/lp2p/rcmgr.go
@@ -15,7 +15,6 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager"
- rcmgrObs "github.com/libp2p/go-libp2p/p2p/host/resource-manager/obs"
"github.com/prometheus/client_golang/prometheus"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
@@ -113,13 +112,13 @@ func ResourceManager(connMgrHi uint) func(lc fx.Lifecycle, repo repo.LockedRepo)
return nil, err
}
- str, err := rcmgrObs.NewStatsTraceReporter()
+ str, err := rcmgr.NewStatsTraceReporter()
if err != nil {
return nil, fmt.Errorf("error creating resource manager stats reporter: %w", err)
}
rcmgrMetricsOnce.Do(func() {
- rcmgrObs.MustRegisterWith(prometheus.DefaultRegisterer)
+ rcmgr.MustRegisterWith(prometheus.DefaultRegisterer)
})
// Metrics
diff --git a/node/modules/services.go b/node/modules/services.go
index 9acebd071..bb1d41917 100644
--- a/node/modules/services.go
+++ b/node/modules/services.go
@@ -265,13 +265,9 @@ func RandomSchedule(lc fx.Lifecycle, mctx helpers.MetricsCtx, p RandomBeaconPara
return nil, err
}
- shd := beacon.Schedule{}
- for _, dc := range p.DrandConfig {
- bc, err := drand.NewDrandBeacon(gen.Timestamp, build.BlockDelaySecs, p.PubSub, dc.Config)
- if err != nil {
- return nil, xerrors.Errorf("creating drand beacon: %w", err)
- }
- shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
+ shd, err := drand.BeaconScheduleFromDrandSchedule(p.DrandConfig, gen.Timestamp, p.PubSub)
+ if err != nil {
+ return nil, xerrors.Errorf("failed to create beacon schedule: %w", err)
}
return shd, nil
diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go
index 74251e21d..29345d035 100644
--- a/node/modules/storageminer.go
+++ b/node/modules/storageminer.go
@@ -1000,7 +1000,6 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error
AvailableBalanceBuffer: types.FIL(cfg.AvailableBalanceBuffer),
DisableCollateralFallback: cfg.DisableCollateralFallback,
- BatchPreCommits: cfg.BatchPreCommits,
MaxPreCommitBatch: cfg.MaxPreCommitBatch,
PreCommitBatchWait: config.Duration(cfg.PreCommitBatchWait),
PreCommitBatchSlack: config.Duration(cfg.PreCommitBatchSlack),
@@ -1045,7 +1044,6 @@ func ToSealingConfig(dealmakingCfg config.DealmakingConfig, sealingCfg config.Se
AvailableBalanceBuffer: types.BigInt(sealingCfg.AvailableBalanceBuffer),
DisableCollateralFallback: sealingCfg.DisableCollateralFallback,
- BatchPreCommits: sealingCfg.BatchPreCommits,
MaxPreCommitBatch: sealingCfg.MaxPreCommitBatch,
PreCommitBatchWait: time.Duration(sealingCfg.PreCommitBatchWait),
PreCommitBatchSlack: time.Duration(sealingCfg.PreCommitBatchSlack),
diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go
index 7de584784..e72a992a3 100644
--- a/paychmgr/paych_test.go
+++ b/paychmgr/paych_test.go
@@ -805,26 +805,6 @@ func createTestVoucher(t *testing.T, ch address.Address, voucherLane uint64, non
return sv
}
-func createTestVoucherWithExtra(t *testing.T, ch address.Address, voucherLane uint64, nonce uint64, voucherAmount big.Int, key []byte) *paychtypes.SignedVoucher { //nolint:deadcode
- sv := &paychtypes.SignedVoucher{
- ChannelAddr: ch,
- Lane: voucherLane,
- Nonce: nonce,
- Amount: voucherAmount,
- Extra: &paychtypes.ModVerifyParams{
- Actor: tutils.NewActorAddr(t, "act"),
- },
- }
-
- signingBytes, err := sv.SigningBytes()
- require.NoError(t, err)
- sig, err := sigs.Sign(crypto.SigTypeSecp256k1, key, signingBytes)
- require.NoError(t, err)
- sv.Signature = sig
-
- return sv
-}
-
type mockBestSpendableAPI struct {
mgr *Manager
}
diff --git a/scripts/snapshot-summary.py b/scripts/snapshot-summary.py
new file mode 100644
index 000000000..f37623cd2
--- /dev/null
+++ b/scripts/snapshot-summary.py
@@ -0,0 +1,30 @@
+import plotly.express as px
+import sys, json
+import pathlib
+
+snapshot_data = json.load(sys.stdin)
+
+# Possible extensions:
+# 1. parameterize to use block count as value instead of byte size
+# 2. parameterize on different types of px chart types
+# 3. parameterize on output port so we can serve this from infra
+
+parents = []
+names = []
+values = []
+
+for key in snapshot_data:
+ path = pathlib.Path(key)
+ name = key
+ parent = str(path.parent)
+ if key == '/':
+ parent = ''
+ stats = snapshot_data[key]
+ parents.append(parent)
+ names.append(name)
+ values.append(stats['Size'])
+
+data = dict(names=names, parents=parents, values=values)
+fig = px.treemap(data, names='names', parents='parents', values='values')
+fig.show()
+
diff --git a/storage/paths/fetch.go b/storage/paths/fetch.go
index 2d87380bd..6b87c0dd9 100644
--- a/storage/paths/fetch.go
+++ b/storage/paths/fetch.go
@@ -91,7 +91,7 @@ func FetchWithTemp(ctx context.Context, urls []string, dest string, header http.
continue
}
- if err := move(tempDest, dest); err != nil {
+ if err := Move(tempDest, dest); err != nil {
return "", xerrors.Errorf("fetch move error %s -> %s: %w", tempDest, dest, err)
}
diff --git a/storage/paths/index.go b/storage/paths/index.go
index 9192ec428..bc26bddb4 100644
--- a/storage/paths/index.go
+++ b/storage/paths/index.go
@@ -367,7 +367,7 @@ loop:
if !sid.primary && primary {
sid.primary = true
} else {
- log.Warnf("sector %v redeclared in %s", s, storageID)
+ log.Debugf("sector %v redeclared in %s", s, storageID)
}
continue loop
}
diff --git a/storage/paths/local.go b/storage/paths/local.go
index a866f5bbe..7d1be644a 100644
--- a/storage/paths/local.go
+++ b/storage/paths/local.go
@@ -720,7 +720,7 @@ func (st *Local) MoveStorage(ctx context.Context, s storiface.SectorRef, types s
return xerrors.Errorf("dropping source sector from index: %w", err)
}
- if err := move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil {
+ if err := Move(storiface.PathByType(src, fileType), storiface.PathByType(dest, fileType)); err != nil {
// TODO: attempt some recovery (check if src is still there, re-declare)
return xerrors.Errorf("moving sector %v(%d): %w", s, fileType, err)
}
diff --git a/storage/paths/remote.go b/storage/paths/remote.go
index ab23e9789..0b7563bb2 100644
--- a/storage/paths/remote.go
+++ b/storage/paths/remote.go
@@ -249,7 +249,7 @@ func (r *Remote) acquireFromRemote(ctx context.Context, s abi.SectorID, fileType
continue
}
- if err := move(tempDest, dest); err != nil {
+ if err := Move(tempDest, dest); err != nil {
return "", xerrors.Errorf("fetch move error (storage %s) %s -> %s: %w", info.ID, tempDest, dest, err)
}
diff --git a/storage/paths/util_unix.go b/storage/paths/util_unix.go
index f691bad09..8796e601a 100644
--- a/storage/paths/util_unix.go
+++ b/storage/paths/util_unix.go
@@ -12,7 +12,7 @@ import (
"golang.org/x/xerrors"
)
-func move(from, to string) error {
+func Move(from, to string) error {
from, err := homedir.Expand(from)
if err != nil {
return xerrors.Errorf("move: expanding from: %w", err)
diff --git a/storage/pipeline/cbor_gen.go b/storage/pipeline/cbor_gen.go
index d14611c6a..57a668ae6 100644
--- a/storage/pipeline/cbor_gen.go
+++ b/storage/pipeline/cbor_gen.go
@@ -31,7 +31,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
cw := cbg.NewCborWriter(w)
- if _, err := cw.Write([]byte{184, 38}); err != nil {
+ if _, err := cw.Write([]byte{184, 39}); err != nil {
return err
}
@@ -565,6 +565,22 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error {
}
}
+ // t.PreCommit1Fails (uint64) (uint64)
+ if len("PreCommit1Fails") > cbg.MaxLength {
+ return xerrors.Errorf("Value in field \"PreCommit1Fails\" was too long")
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommit1Fails"))); err != nil {
+ return err
+ }
+ if _, err := io.WriteString(w, string("PreCommit1Fails")); err != nil {
+ return err
+ }
+
+ if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.PreCommit1Fails)); err != nil {
+ return err
+ }
+
// t.PreCommit2Fails (uint64) (uint64)
if len("PreCommit2Fails") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"PreCommit2Fails\" was too long")
@@ -1402,6 +1418,21 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) {
t.UpdateUnsealed = &c
}
+ }
+ // t.PreCommit1Fails (uint64) (uint64)
+ case "PreCommit1Fails":
+
+ {
+
+ maj, extra, err = cr.ReadHeader()
+ if err != nil {
+ return err
+ }
+ if maj != cbg.MajUnsignedInt {
+ return fmt.Errorf("wrong type for uint64 field")
+ }
+ t.PreCommit1Fails = uint64(extra)
+
}
// t.PreCommit2Fails (uint64) (uint64)
case "PreCommit2Fails":
diff --git a/storage/pipeline/commit_batch_test.go b/storage/pipeline/commit_batch_test.go
index 15c2100cb..5ae2f171a 100644
--- a/storage/pipeline/commit_batch_test.go
+++ b/storage/pipeline/commit_batch_test.go
@@ -53,7 +53,6 @@ func TestCommitBatcher(t *testing.T) {
WaitDealsDelay: time.Hour * 6,
AlwaysKeepUnsealedCopy: true,
- BatchPreCommits: true,
MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize,
PreCommitBatchWait: 24 * time.Hour,
PreCommitBatchSlack: 3 * time.Hour,
diff --git a/storage/pipeline/fsm.go b/storage/pipeline/fsm.go
index 8ae18a9fd..ac3dafa86 100644
--- a/storage/pipeline/fsm.go
+++ b/storage/pipeline/fsm.go
@@ -90,7 +90,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto
on(SectorOldTicket{}, GetTicket),
),
PreCommit2: planOne(
- on(SectorPreCommit2{}, PreCommitting),
+ on(SectorPreCommit2{}, SubmitPreCommitBatch),
on(SectorSealPreCommit2Failed{}, SealPreCommit2Failed),
on(SectorSealPreCommit1Failed{}, SealPreCommit1Failed),
),
diff --git a/storage/pipeline/fsm_events.go b/storage/pipeline/fsm_events.go
index 122691ca3..a798a884b 100644
--- a/storage/pipeline/fsm_events.go
+++ b/storage/pipeline/fsm_events.go
@@ -182,6 +182,8 @@ func (evt SectorSealPreCommit1Failed) FormatError(xerrors.Printer) (next error)
func (evt SectorSealPreCommit1Failed) apply(si *SectorInfo) {
si.InvalidProofs = 0 // reset counter
si.PreCommit2Fails = 0
+
+ si.PreCommit1Fails++
}
type SectorSealPreCommit2Failed struct{ error }
diff --git a/storage/pipeline/fsm_test.go b/storage/pipeline/fsm_test.go
index f12b66f93..7d7201953 100644
--- a/storage/pipeline/fsm_test.go
+++ b/storage/pipeline/fsm_test.go
@@ -1,14 +1,18 @@
package sealing
import (
+ "context"
"testing"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
+ "golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-statemachine"
+
+ "github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func init() {
@@ -54,10 +58,10 @@ func TestHappyPath(t *testing.T) {
require.Equal(m.t, m.state.State, PreCommit2)
m.planSingle(SectorPreCommit2{})
- require.Equal(m.t, m.state.State, PreCommitting)
+ require.Equal(m.t, m.state.State, SubmitPreCommitBatch)
- m.planSingle(SectorPreCommitted{})
- require.Equal(m.t, m.state.State, PreCommitWait)
+ m.planSingle(SectorPreCommitBatchSent{})
+ require.Equal(m.t, m.state.State, PreCommitBatchWait)
m.planSingle(SectorPreCommitLanded{})
require.Equal(m.t, m.state.State, WaitSeed)
@@ -77,7 +81,7 @@ func TestHappyPath(t *testing.T) {
m.planSingle(SectorFinalized{})
require.Equal(m.t, m.state.State, Proving)
- expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
+ expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving}
for i, n := range notif {
if n.before.State != expected[i] {
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
@@ -116,10 +120,10 @@ func TestHappyPathFinalizeEarly(t *testing.T) {
require.Equal(m.t, m.state.State, PreCommit2)
m.planSingle(SectorPreCommit2{})
- require.Equal(m.t, m.state.State, PreCommitting)
+ require.Equal(m.t, m.state.State, SubmitPreCommitBatch)
- m.planSingle(SectorPreCommitted{})
- require.Equal(m.t, m.state.State, PreCommitWait)
+ m.planSingle(SectorPreCommitBatchSent{})
+ require.Equal(m.t, m.state.State, PreCommitBatchWait)
m.planSingle(SectorPreCommitLanded{})
require.Equal(m.t, m.state.State, WaitSeed)
@@ -145,7 +149,7 @@ func TestHappyPathFinalizeEarly(t *testing.T) {
m.planSingle(SectorFinalized{})
require.Equal(m.t, m.state.State, Proving)
- expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector, Proving}
+ expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector, Proving}
for i, n := range notif {
if n.before.State != expected[i] {
t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State)
@@ -220,10 +224,10 @@ func TestSeedRevert(t *testing.T) {
require.Equal(m.t, m.state.State, PreCommit2)
m.planSingle(SectorPreCommit2{})
- require.Equal(m.t, m.state.State, PreCommitting)
+ require.Equal(m.t, m.state.State, SubmitPreCommitBatch)
- m.planSingle(SectorPreCommitted{})
- require.Equal(m.t, m.state.State, PreCommitWait)
+ m.planSingle(SectorPreCommitBatchSent{})
+ require.Equal(m.t, m.state.State, PreCommitBatchWait)
m.planSingle(SectorPreCommitLanded{})
require.Equal(m.t, m.state.State, WaitSeed)
@@ -451,3 +455,24 @@ func TestCreationTimeCleared(t *testing.T) {
require.NotEqual(t, int64(0), m.state.CreationTime)
}
+
+func TestRetrySoftErr(t *testing.T) {
+ i := 0
+
+ tf := func() error {
+ i++
+ switch i {
+ case 1:
+ return storiface.Err(storiface.ErrTempAllocateSpace, xerrors.New("foo"))
+ case 2:
+ return nil
+ default:
+ t.Fatalf("what")
+ return xerrors.Errorf("this error didn't ever happen, and will never happen")
+ }
+ }
+
+ err := retrySoftErr(context.Background(), tf)
+ require.NoError(t, err)
+ require.Equal(t, 2, i)
+}
diff --git a/storage/pipeline/precommit_batch.go b/storage/pipeline/precommit_batch.go
index 63e263662..7e25d500b 100644
--- a/storage/pipeline/precommit_batch.go
+++ b/storage/pipeline/precommit_batch.go
@@ -7,7 +7,6 @@ import (
"sync"
"time"
- "github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@@ -193,33 +192,30 @@ func (b *PreCommitBatcher) maybeStartBatch(notif bool) ([]sealiface.PreCommitBat
return nil, xerrors.Errorf("getting config: %w", err)
}
- if notif && total < cfg.MaxPreCommitBatch {
- return nil, nil
- }
-
ts, err := b.api.ChainHead(b.mctx)
if err != nil {
return nil, err
}
- // TODO: Drop this once nv14 has come and gone
+ curBasefeeLow := false
+ if !cfg.BatchPreCommitAboveBaseFee.Equals(big.Zero()) && ts.MinTicketBlock().ParentBaseFee.LessThan(cfg.BatchPreCommitAboveBaseFee) {
+ curBasefeeLow = true
+ }
+
+ // if this wasn't an user-forced batch, and we're not at/above the max batch size,
+ // and we're not above the basefee threshold, don't batch yet
+ if notif && total < cfg.MaxPreCommitBatch && !curBasefeeLow {
+ return nil, nil
+ }
+
nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key())
if err != nil {
return nil, xerrors.Errorf("couldn't get network version: %w", err)
}
- individual := false
- if !cfg.BatchPreCommitAboveBaseFee.Equals(big.Zero()) && ts.MinTicketBlock().ParentBaseFee.LessThan(cfg.BatchPreCommitAboveBaseFee) && nv >= network.Version14 {
- individual = true
- }
-
- // todo support multiple batches
- var res []sealiface.PreCommitBatchRes
- if !individual {
- res, err = b.processBatch(cfg, ts.Key(), ts.MinTicketBlock().ParentBaseFee, nv)
- } else {
- res, err = b.processIndividually(cfg)
- }
+ // For precommits the only method to precommit sectors after nv21(22?) is to use the new precommit_batch2 method
+ // So we always batch
+ res, err := b.processBatch(cfg, ts.Key(), ts.MinTicketBlock().ParentBaseFee, nv)
if err != nil && len(res) == 0 {
return nil, err
}
@@ -243,91 +239,14 @@ func (b *PreCommitBatcher) maybeStartBatch(notif bool) ([]sealiface.PreCommitBat
return res, nil
}
-func (b *PreCommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.PreCommitBatchRes, error) {
- mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, types.EmptyTSK)
- if err != nil {
- return nil, xerrors.Errorf("couldn't get miner info: %w", err)
- }
-
- avail := types.TotalFilecoinInt
-
- if cfg.CollateralFromMinerBalance && !cfg.DisableCollateralFallback {
- avail, err = b.api.StateMinerAvailableBalance(b.mctx, b.maddr, types.EmptyTSK)
- if err != nil {
- return nil, xerrors.Errorf("getting available miner balance: %w", err)
- }
-
- avail = big.Sub(avail, cfg.AvailableBalanceBuffer)
- if avail.LessThan(big.Zero()) {
- avail = big.Zero()
- }
- }
-
- var res []sealiface.PreCommitBatchRes
-
- for sn, info := range b.todo {
- r := sealiface.PreCommitBatchRes{
- Sectors: []abi.SectorNumber{sn},
- }
-
- mcid, err := b.processSingle(cfg, mi, &avail, info)
- if err != nil {
- r.Error = err.Error()
- } else {
- r.Msg = &mcid
- }
-
- res = append(res, r)
- }
-
- return res, nil
-}
-
-func (b *PreCommitBatcher) processSingle(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, entry *preCommitEntry) (cid.Cid, error) {
- msgParams := infoToPreCommitSectorParams(entry.pci)
- enc := new(bytes.Buffer)
-
- if err := msgParams.MarshalCBOR(enc); err != nil {
- return cid.Undef, xerrors.Errorf("marshaling precommit params: %w", err)
- }
-
- deposit := entry.deposit
- if cfg.CollateralFromMinerBalance {
- c := big.Sub(deposit, *avail)
- *avail = big.Sub(*avail, deposit)
- deposit = c
-
- if deposit.LessThan(big.Zero()) {
- deposit = big.Zero()
- }
- if (*avail).LessThan(big.Zero()) {
- *avail = big.Zero()
- }
- }
-
- goodFunds := big.Add(deposit, big.Int(b.feeCfg.MaxPreCommitGasFee))
-
- from, _, err := b.addrSel.AddressFor(b.mctx, b.api, mi, api.PreCommitAddr, goodFunds, deposit)
- if err != nil {
- return cid.Undef, xerrors.Errorf("no good address to send precommit message from: %w", err)
- }
-
- mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSector, deposit, big.Int(b.feeCfg.MaxPreCommitGasFee), enc.Bytes())
- if err != nil {
- return cid.Undef, xerrors.Errorf("pushing message to mpool: %w", err)
- }
-
- return mcid, nil
-}
-
func (b *PreCommitBatcher) processPreCommitBatch(cfg sealiface.Config, bf abi.TokenAmount, entries []*preCommitEntry, nv network.Version) ([]sealiface.PreCommitBatchRes, error) {
- params := miner.PreCommitSectorBatchParams{}
+ params := miner.PreCommitSectorBatchParams2{}
deposit := big.Zero()
var res sealiface.PreCommitBatchRes
for _, p := range entries {
res.Sectors = append(res.Sectors, p.pci.SectorNumber)
- params.Sectors = append(params.Sectors, *infoToPreCommitSectorParams(p.pci))
+ params.Sectors = append(params.Sectors, *p.pci)
deposit = big.Add(deposit, p.deposit)
}
@@ -367,7 +286,7 @@ func (b *PreCommitBatcher) processPreCommitBatch(cfg sealiface.Config, bf abi.To
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err)
}
- _, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch, needFunds, maxFee, enc.Bytes())
+ _, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch2, needFunds, maxFee, enc.Bytes())
if err != nil && (!api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || len(entries) == 1) {
res.Error = err.Error()
@@ -385,7 +304,7 @@ func (b *PreCommitBatcher) processPreCommitBatch(cfg sealiface.Config, bf abi.To
}
// If state call succeeds, we can send the message for real
- mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch, needFunds, maxFee, enc.Bytes())
+ mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.PreCommitSectorBatch2, needFunds, maxFee, enc.Bytes())
if err != nil {
res.Error = err.Error()
return []sealiface.PreCommitBatchRes{res}, xerrors.Errorf("pushing message to mpool: %w", err)
diff --git a/storage/pipeline/precommit_batch_test.go b/storage/pipeline/precommit_batch_test.go
index 6951faad7..1f3aaf244 100644
--- a/storage/pipeline/precommit_batch_test.go
+++ b/storage/pipeline/precommit_batch_test.go
@@ -56,7 +56,6 @@ func TestPrecommitBatcher(t *testing.T) {
WaitDealsDelay: time.Hour * 6,
AlwaysKeepUnsealedCopy: true,
- BatchPreCommits: true,
MaxPreCommitBatch: maxBatch,
PreCommitBatchWait: 24 * time.Hour,
PreCommitBatchSlack: 3 * time.Hour,
@@ -114,7 +113,7 @@ func TestPrecommitBatcher(t *testing.T) {
basefee = big.NewInt(10001)
}
- s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil)
+ s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil).MaxTimes(2) // once in AddPreCommit
go func() {
defer done.Unlock()
@@ -183,28 +182,6 @@ func TestPrecommitBatcher(t *testing.T) {
expectInitialCalls := func() action {
return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise {
s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(10001), 1), nil)
- s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version14, nil)
- return nil
- }
- }
-
- //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001
- expectSendsSingle := func(expect []abi.SectorNumber) action {
- return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise {
- s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(9999), 1), nil)
- s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version14, nil)
-
- s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil)
- for _, number := range expect {
- numClone := number
- s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool {
- b := i.(*types.Message)
- var params miner6.PreCommitSectorParams
- require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params)))
- require.Equal(t, numClone, params.SectorNumber)
- return true
- }), gomock.Any()).Return(dummySmsg, nil)
- }
return nil
}
}
@@ -240,18 +217,11 @@ func TestPrecommitBatcher(t *testing.T) {
}{
"addSingle": {
actions: []action{
- addSector(0, false),
+ addSector(0, true),
waitPending(1),
flush([]abi.SectorNumber{0}),
},
},
- "addTwo": {
- actions: []action{
- addSectors(getSectors(2), false),
- waitPending(2),
- flush(getSectors(2)),
- },
- },
"addMax": {
actions: []action{
expectInitialCalls(),
@@ -268,10 +238,10 @@ func TestPrecommitBatcher(t *testing.T) {
addSectors(getSectors(maxBatch), true),
},
},
- "addMax-belowBaseFee": {
+ "addOne-belowBaseFee": {
actions: []action{
- expectSendsSingle(getSectors(maxBatch)),
- addSectors(getSectors(maxBatch), false),
+ expectSend(getSectors(1), false),
+ addSectors(getSectors(1), false),
},
},
}
@@ -287,6 +257,7 @@ func TestPrecommitBatcher(t *testing.T) {
// create them mocks
pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl)
+ pcapi.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version20, nil).AnyTimes()
pcb := pipeline.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg)
diff --git a/storage/pipeline/receive.go b/storage/pipeline/receive.go
index b1b0ce99f..8427eba54 100644
--- a/storage/pipeline/receive.go
+++ b/storage/pipeline/receive.go
@@ -123,7 +123,7 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta
if err := m.maddr.MarshalCBOR(maddrBuf); err != nil {
return SectorInfo{}, xerrors.Errorf("marshal miner address for seed check: %w", err)
}
- rand, err := m.Api.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, meta.SeedEpoch, maddrBuf.Bytes(), ts.Key())
+ rand, err := m.Api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, meta.SeedEpoch, maddrBuf.Bytes(), ts.Key())
if err != nil {
return SectorInfo{}, xerrors.Errorf("generating check seed: %w", err)
}
diff --git a/storage/pipeline/sealiface/config.go b/storage/pipeline/sealiface/config.go
index dbdb91d54..99715fc28 100644
--- a/storage/pipeline/sealiface/config.go
+++ b/storage/pipeline/sealiface/config.go
@@ -42,7 +42,6 @@ type Config struct {
AvailableBalanceBuffer abi.TokenAmount
DisableCollateralFallback bool
- BatchPreCommits bool
MaxPreCommitBatch int
PreCommitBatchWait time.Duration
PreCommitBatchSlack time.Duration
diff --git a/storage/pipeline/sector_state.go b/storage/pipeline/sector_state.go
index 84c08f43b..e1f5bfd69 100644
--- a/storage/pipeline/sector_state.go
+++ b/storage/pipeline/sector_state.go
@@ -82,7 +82,7 @@ const (
PreCommit1 SectorState = "PreCommit1" // do PreCommit1
PreCommit2 SectorState = "PreCommit2" // do PreCommit2
- PreCommitting SectorState = "PreCommitting" // on chain pre-commit
+ PreCommitting SectorState = "PreCommitting" // on chain pre-commit (deprecated)
PreCommitWait SectorState = "PreCommitWait" // waiting for precommit to land on chain
SubmitPreCommitBatch SectorState = "SubmitPreCommitBatch"
diff --git a/storage/pipeline/states_failed.go b/storage/pipeline/states_failed.go
index d952d8eda..203f14910 100644
--- a/storage/pipeline/states_failed.go
+++ b/storage/pipeline/states_failed.go
@@ -54,7 +54,13 @@ func (m *Sealing) checkPreCommitted(ctx statemachine.Context, sector SectorInfo)
return info, true
}
+var MaxPreCommit1Retries = uint64(3)
+
func (m *Sealing) handleSealPrecommit1Failed(ctx statemachine.Context, sector SectorInfo) error {
+ if sector.PreCommit1Fails > MaxPreCommit1Retries {
+ return ctx.Send(SectorRemove{})
+ }
+
if err := failedCooldown(ctx, sector); err != nil {
return err
}
@@ -301,8 +307,21 @@ func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo
switch mw.Receipt.ExitCode {
case exitcode.Ok:
- // API error in CcommitWait
- return ctx.Send(SectorRetryCommitWait{})
+ si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSet)
+ if err != nil {
+ // API error
+ if err := failedCooldown(ctx, sector); err != nil {
+ return err
+ }
+
+ return ctx.Send(SectorRetryCommitWait{})
+ }
+ if si != nil {
+ // API error in CommitWait?
+ return ctx.Send(SectorRetryCommitWait{})
+ }
+ // if si == nil, something else went wrong; Likely expired deals, we'll
+ // find out in checkCommit
case exitcode.SysErrOutOfGas:
// API error in CommitWait AND gas estimator guessed a wrong number in SubmitCommit
return ctx.Send(SectorRetrySubmitCommit{})
diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go
index 0608ead07..48d024f85 100644
--- a/storage/pipeline/states_sealing.go
+++ b/storage/pipeline/states_sealing.go
@@ -4,8 +4,10 @@ import (
"bytes"
"context"
"encoding/json"
+ "errors"
"io"
"net/http"
+ "time"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
@@ -213,6 +215,41 @@ func (m *Sealing) handleGetTicket(ctx statemachine.Context, sector SectorInfo) e
})
}
+var SoftErrRetryWait = 5 * time.Second
+
+func retrySoftErr(ctx context.Context, cb func() error) error {
+ for {
+ err := cb()
+ if err == nil {
+ return nil
+ }
+
+ var cerr storiface.WorkError
+
+ if errors.As(err, &cerr) {
+ switch cerr.ErrCode() {
+ case storiface.ErrTempWorkerRestart:
+ fallthrough
+ case storiface.ErrTempAllocateSpace:
+ // retry
+ default:
+ // non-temp error
+ return err
+ }
+
+ // check if the context got cancelled early
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ // retry
+ time.Sleep(SoftErrRetryWait)
+ } else {
+ return err
+ }
+ }
+}
+
func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo) error {
if err := checkPieces(ctx.Context(), m.maddr, sector.SectorNumber, sector.Pieces, m.Api, false); err != nil { // Sanity check state
switch err.(type) {
@@ -269,7 +306,11 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
}
}
- pc1o, err := m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
+ var pc1o storiface.PreCommit1Out
+ err = retrySoftErr(ctx.Context(), func() (err error) {
+ pc1o, err = m.sealer.SealPreCommit1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.TicketValue, sector.pieceInfos())
+ return err
+ })
if err != nil {
return ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("seal pre commit(1) failed: %w", err)})
}
@@ -280,7 +321,12 @@ func (m *Sealing) handlePreCommit1(ctx statemachine.Context, sector SectorInfo)
}
func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) error {
- cids, err := m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out)
+ var cids storiface.SectorCids
+
+ err := retrySoftErr(ctx.Context(), func() (err error) {
+ cids, err = m.sealer.SealPreCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.PreCommit1Out)
+ return err
+ })
if err != nil {
return ctx.Send(SectorSealPreCommit2Failed{xerrors.Errorf("seal pre commit(2) failed: %w", err)})
}
@@ -368,6 +414,10 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m
DealIDs: sector.dealIDs(),
}
+ if sector.hasDeals() {
+ params.UnsealedCid = sector.CommD
+ }
+
collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, ts.Key())
if err != nil {
return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("getting initial pledge collateral: %w", err)
@@ -377,62 +427,10 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m
}
func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error {
- cfg, err := m.getConfig()
- if err != nil {
- return xerrors.Errorf("getting config: %w", err)
- }
-
- if cfg.BatchPreCommits {
- nv, err := m.Api.StateNetworkVersion(ctx.Context(), types.EmptyTSK)
- if err != nil {
- return xerrors.Errorf("getting network version: %w", err)
- }
-
- if nv >= network.Version13 {
- return ctx.Send(SectorPreCommitBatch{})
- }
- }
-
- info, pcd, tsk, err := m.preCommitInfo(ctx, sector)
- if err != nil {
- return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitInfo: %w", err)})
- }
- if info == nil {
- return nil // event was sent in preCommitInfo
- }
-
- params := infoToPreCommitSectorParams(info)
-
- deposit, err := collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, pcd)
- if err != nil {
- return err
- }
-
- enc := new(bytes.Buffer)
- if err := params.MarshalCBOR(enc); err != nil {
- return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("could not serialize pre-commit sector parameters: %w", err)})
- }
-
- mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, tsk)
- if err != nil {
- log.Errorf("handlePreCommitting: api error, not proceeding: %+v", err)
- return nil
- }
-
- goodFunds := big.Add(deposit, big.Int(m.feeCfg.MaxPreCommitGasFee))
-
- from, _, err := m.addrSel.AddressFor(ctx.Context(), m.Api, mi, api.PreCommitAddr, goodFunds, deposit)
- if err != nil {
- return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("no good address to send precommit message from: %w", err)})
- }
-
- log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit)
- mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes())
- if err != nil {
- return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)})
- }
-
- return ctx.Send(SectorPreCommitted{Message: mcid, PreCommitDeposit: pcd, PreCommitInfo: *info})
+ // note: this is a legacy state handler, normally new sectors won't enter this state
+ // but we keep this handler in order to not break existing sector state machines.
+ // todo: drop after nv21
+ return ctx.Send(SectorPreCommitBatch{})
}
func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector SectorInfo) error {
diff --git a/storage/pipeline/types.go b/storage/pipeline/types.go
index 6329b5666..e752eb2b9 100644
--- a/storage/pipeline/types.go
+++ b/storage/pipeline/types.go
@@ -56,6 +56,8 @@ type SectorInfo struct {
TicketEpoch abi.ChainEpoch
PreCommit1Out storiface.PreCommit1Out
+ PreCommit1Fails uint64
+
// PreCommit2
CommD *cid.Cid
CommR *cid.Cid // SectorKey
diff --git a/storage/pipeline/utils.go b/storage/pipeline/utils.go
index ce4283b6c..4b99a5bea 100644
--- a/storage/pipeline/utils.go
+++ b/storage/pipeline/utils.go
@@ -10,7 +10,6 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
- "github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
@@ -127,14 +126,3 @@ func sendMsg(ctx context.Context, sa interface {
return smsg.Cid(), nil
}
-
-func infoToPreCommitSectorParams(info *miner.SectorPreCommitInfo) *miner.PreCommitSectorParams {
- return &miner.PreCommitSectorParams{
- SealProof: info.SealProof,
- SectorNumber: info.SectorNumber,
- SealedCID: info.SealedCID,
- SealRandEpoch: info.SealRandEpoch,
- DealIDs: info.DealIDs,
- Expiration: info.Expiration,
- }
-}
diff --git a/storage/sealer/ffiwrapper/basicfs/fs.go b/storage/sealer/ffiwrapper/basicfs/fs.go
index 7a9f70d59..4fd8e271f 100644
--- a/storage/sealer/ffiwrapper/basicfs/fs.go
+++ b/storage/sealer/ffiwrapper/basicfs/fs.go
@@ -89,3 +89,7 @@ func (b *Provider) AcquireSector(ctx context.Context, id storiface.SectorRef, ex
return out, done, nil
}
+
+func (b *Provider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ return b.AcquireSector(ctx, id, existing, allocate, ptype)
+}
diff --git a/storage/sealer/ffiwrapper/sealer_cgo.go b/storage/sealer/ffiwrapper/sealer_cgo.go
index 871012d0b..b11dec9ce 100644
--- a/storage/sealer/ffiwrapper/sealer_cgo.go
+++ b/storage/sealer/ffiwrapper/sealer_cgo.go
@@ -10,6 +10,7 @@ import (
"crypto/rand"
"encoding/base64"
"encoding/json"
+ "errors"
"io"
"math/bits"
"os"
@@ -31,9 +32,9 @@ import (
"github.com/filecoin-project/lotus/lib/nullreader"
spaths "github.com/filecoin-project/lotus/storage/paths"
- nr "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader"
"github.com/filecoin-project/lotus/storage/sealer/fr32"
"github.com/filecoin-project/lotus/storage/sealer/partialfile"
+ "github.com/filecoin-project/lotus/storage/sealer/proofpaths"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
@@ -192,7 +193,7 @@ func (sb *Sealer) AddPiece(ctx context.Context, sector storiface.SectorRef, exis
defer func() {
closer, ok := origPieceData.(io.Closer)
if !ok {
- log.Warnf("AddPiece: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
+ log.Debugf("AddPiece: cannot close pieceData reader %T because it is not an io.Closer", origPieceData)
return
}
if err := closer.Close(); err != nil {
@@ -403,92 +404,190 @@ func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, err
return pieceCID, werr()
}
-func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storiface.SectorRef, commD cid.Cid, unsealedPath string, randomness abi.SealRandomness) (bool, error) {
- replicaPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathSealing)
+func (sb *Sealer) acquireUpdatePath(ctx context.Context, sector storiface.SectorRef) (string, func(), error) {
+ // copy so that the sector doesn't get removed from a long-term storage path
+ replicaPath, releaseSector, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTUpdate, storiface.FTNone, storiface.PathSealing)
if xerrors.Is(err, storiface.ErrSectorNotFound) {
- return false, nil
+ return "", releaseSector, nil
} else if err != nil {
- return false, xerrors.Errorf("reading updated replica: %w", err)
+ return "", releaseSector, xerrors.Errorf("reading updated replica: %w", err)
}
- defer done()
- sealedPaths, done2, err := sb.AcquireSectorKeyOrRegenerate(ctx, sector, randomness)
+ return replicaPath.Update, releaseSector, nil
+}
+
+func (sb *Sealer) decodeUpdatedReplica(ctx context.Context, sector storiface.SectorRef, commD cid.Cid, updatePath, unsealedPath string, randomness abi.SealRandomness) error {
+ keyPaths, done2, err := sb.acquireSectorKeyOrRegenerate(ctx, sector, randomness)
if err != nil {
- return false, xerrors.Errorf("acquiring sealed sector: %w", err)
+ return xerrors.Errorf("acquiring sealed sector: %w", err)
}
defer done2()
// Sector data stored in replica update
updateProof, err := sector.ProofType.RegisteredUpdateProof()
if err != nil {
- return false, err
+ return err
}
- return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, replicaPath.Update, sealedPaths.Sealed, sealedPaths.Cache, commD)
-}
-
-func (sb *Sealer) AcquireSectorKeyOrRegenerate(ctx context.Context, sector storiface.SectorRef, randomness abi.SealRandomness) (storiface.SectorPaths, func(), error) {
- paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
- if err == nil {
- return paths, done, err
- } else if !xerrors.Is(err, storiface.ErrSectorNotFound) {
- return paths, done, xerrors.Errorf("reading sector key: %w", err)
+ if err := ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, updatePath, keyPaths.Sealed, keyPaths.Cache, commD); err != nil {
+ return xerrors.Errorf("decoding unsealed sector data: %w", err)
}
- // Sector key can't be found, so let's regenerate it
- sectorSize, err := sector.ProofType.SectorSize()
- if err != nil {
- return paths, done, xerrors.Errorf("retrieving sector size: %w", err)
- }
- paddedSize := abi.PaddedPieceSize(sectorSize)
-
- _, err = sb.AddPiece(ctx, sector, nil, paddedSize.Unpadded(), nr.NewNullReader(paddedSize.Unpadded()))
- if err != nil {
- return paths, done, xerrors.Errorf("recomputing empty data: %w", err)
- }
-
- err = sb.RegenerateSectorKey(ctx, sector, randomness, []abi.PieceInfo{{PieceCID: zerocomm.ZeroPieceCommitment(paddedSize.Unpadded()), Size: paddedSize}})
- if err != nil {
- return paths, done, xerrors.Errorf("during pc1: %w", err)
- }
-
- // Sector key should exist now, let's grab the paths
- return sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage)
-}
-
-func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
ssize, err := sector.ProofType.SectorSize()
if err != nil {
return err
}
maxPieceSize := abi.PaddedPieceSize(ssize)
- // try finding existing
+ pf, err := partialfile.OpenPartialFile(maxPieceSize, unsealedPath)
+ if err != nil {
+ return xerrors.Errorf("opening partial file: %w", err)
+ }
+
+ if err := pf.MarkAllocated(0, maxPieceSize); err != nil {
+ return xerrors.Errorf("marking range allocated: %w", err)
+ }
+
+ if err := pf.Close(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (sb *Sealer) acquireSectorKeyOrRegenerate(ctx context.Context, sector storiface.SectorRef, randomness abi.SealRandomness) (storiface.SectorPaths, func(), error) {
+ // copy so that the files aren't removed from long-term storage
+ paths, done, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathSealing)
+ if err == nil {
+ return paths, done, err
+ } else if !xerrors.Is(err, storiface.ErrSectorNotFound) {
+ return paths, done, xerrors.Errorf("reading sector key: %w", err)
+ }
+
+ sectorSize, err := sector.ProofType.SectorSize()
+ if err != nil {
+ return storiface.SectorPaths{}, nil, xerrors.Errorf("retrieving sector size: %w", err)
+ }
+
+ err = sb.regenerateSectorKey(ctx, sector, randomness, zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(sectorSize).Unpadded()))
+ if err != nil {
+ return storiface.SectorPaths{}, nil, xerrors.Errorf("regenerating sector key: %w", err)
+ }
+
+ // Sector key should exist now, let's grab the paths
+ return sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathSealing)
+}
+
+func (sb *Sealer) regenerateSectorKey(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, keyDataCid cid.Cid) error {
+ paths, releaseSector, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTCache, storiface.FTSealed, storiface.PathSealing)
+ if err != nil {
+ return xerrors.Errorf("acquiring sector paths: %w", err)
+ }
+ defer releaseSector()
+
+ // stat paths.Sealed, make sure it doesn't exist
+ _, err = os.Stat(paths.Sealed)
+ if err == nil {
+ return xerrors.Errorf("sealed file exists before regenerating sector key")
+ }
+ if !os.IsNotExist(err) {
+ return xerrors.Errorf("stat sealed path: %w", err)
+ }
+
+ // prepare SDR params
+ commp, err := commcid.CIDToDataCommitmentV1(keyDataCid)
+ if err != nil {
+ return xerrors.Errorf("computing commP: %w", err)
+ }
+
+ replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp)
+ if err != nil {
+ return xerrors.Errorf("computing replica id: %w", err)
+ }
+
+ // generate new sector key
+ err = ffi.GenerateSDR(
+ sector.ProofType,
+ paths.Cache,
+ replicaID,
+ )
+ if err != nil {
+ return xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
+ }
+
+ // move the last layer (sector key) to the sealed location
+ layerCount, err := proofpaths.SDRLayers(sector.ProofType)
+ if err != nil {
+ return xerrors.Errorf("getting SDR layer count: %w", err)
+ }
+
+ lastLayer := filepath.Join(paths.Cache, proofpaths.LayerFileName(layerCount))
+
+ sealedInCache := filepath.Join(paths.Cache, filepath.Base(paths.Sealed))
+ // rename last layer to sealed sector file name in the cache dir, which is
+ // almost guaranteed to happen on one filesystem
+ err = os.Rename(lastLayer, sealedInCache)
+ if err != nil {
+ return xerrors.Errorf("renaming last layer: %w", err)
+ }
+
+ err = spaths.Move(sealedInCache, paths.Sealed)
+ if err != nil {
+ return xerrors.Errorf("moving sector key: %w", err)
+ }
+
+ // remove other layer files
+ for i := 1; i < layerCount; i++ {
+ err = os.Remove(filepath.Join(paths.Cache, proofpaths.LayerFileName(i)))
+ if err != nil {
+ return xerrors.Errorf("removing layer file %d: %w", i, err)
+ }
+ }
+
+ return nil
+}
+
+func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error {
+ // NOTE: This function will copy sealed/unsealed (and possible update) files
+ // into sealing storage. Those copies get cleaned up in LocalWorker.UnsealPiece
+ // after this call exists. The resulting unsealed file is going to be moved to
+ // long-term storage as well.
+
+ ssize, err := sector.ProofType.SectorSize()
+ if err != nil {
+ return err
+ }
+ maxPieceSize := abi.PaddedPieceSize(ssize)
+
+ // try finding existing (also move to a sealing path if it's not here)
unsealedPath, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTNone, storiface.PathSealing)
var pf *partialfile.PartialFile
switch {
case xerrors.Is(err, storiface.ErrSectorNotFound):
+ // allocate if doesn't exist
unsealedPath, done, err = sb.sectors.AcquireSector(ctx, sector, storiface.FTNone, storiface.FTUnsealed, storiface.PathSealing)
if err != nil {
return xerrors.Errorf("acquire unsealed sector path (allocate): %w", err)
}
- defer done()
-
- pf, err = partialfile.CreatePartialFile(maxPieceSize, unsealedPath.Unsealed)
- if err != nil {
- return xerrors.Errorf("create unsealed file: %w", err)
- }
-
case err == nil:
- defer done()
-
- pf, err = partialfile.OpenPartialFile(maxPieceSize, unsealedPath.Unsealed)
- if err != nil {
- return xerrors.Errorf("opening partial file: %w", err)
- }
+ // no-op
default:
return xerrors.Errorf("acquire unsealed sector path (existing): %w", err)
}
+
+ defer done()
+
+ pf, err = partialfile.OpenPartialFile(maxPieceSize, unsealedPath.Unsealed)
+ if err != nil {
+ if errors.Is(err, os.ErrNotExist) {
+ pf, err = partialfile.CreatePartialFile(maxPieceSize, unsealedPath.Unsealed)
+ if err != nil {
+ return xerrors.Errorf("creating partial file: %w", err)
+ }
+ } else {
+ return xerrors.Errorf("opening partial file: %w", err)
+ }
+ }
defer pf.Close() // nolint
allocated, err := pf.Allocated()
@@ -496,6 +595,8 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, o
return xerrors.Errorf("getting bitruns of allocated data: %w", err)
}
+ // figure out if there's anything that needs to be unsealed
+
toUnseal, err := computeUnsealRanges(allocated, offset, size)
if err != nil {
return xerrors.Errorf("computing unseal ranges: %w", err)
@@ -505,21 +606,36 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storiface.SectorRef, o
return nil
}
+ // need to unseal
+
// If piece data stored in updated replica decode whole sector
- decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed, randomness)
+ upd, updDone, err := sb.acquireUpdatePath(ctx, sector)
if err != nil {
- return xerrors.Errorf("decoding sector from replica: %w", err)
- }
- if decoded {
- return pf.MarkAllocated(0, maxPieceSize)
+ return xerrors.Errorf("acquiring update path: %w", err)
}
- // Piece data sealed in sector
- srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathSealing)
+ if upd != "" {
+ defer updDone()
+
+ // decodeUpdatedReplica mill modify the unsealed file
+ if err := pf.Close(); err != nil {
+ return err
+ }
+
+ err := sb.decodeUpdatedReplica(ctx, sector, commd, upd, unsealedPath.Unsealed, randomness)
+ if err != nil {
+ return xerrors.Errorf("decoding sector from replica: %w", err)
+ }
+ return nil
+ }
+
+ // Piece data non-upgrade sealed in sector
+ // (copy so that files stay in long-term storage)
+ srcPaths, releaseSector, err := sb.sectors.AcquireSectorCopy(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathSealing)
if err != nil {
return xerrors.Errorf("acquire sealed sector paths: %w", err)
}
- defer srcDone()
+ defer releaseSector()
sealed, err := os.OpenFile(srcPaths.Sealed, os.O_RDONLY, 0644) // nolint:gosec
if err != nil {
@@ -687,51 +803,6 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector storif
return true, nil
}
-func (sb *Sealer) RegenerateSectorKey(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) error {
- paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTCache, storiface.FTSealed, storiface.PathSealing)
- if err != nil {
- return xerrors.Errorf("acquiring sector paths: %w", err)
- }
- defer done()
-
- e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec
- if err != nil {
- return xerrors.Errorf("ensuring sealed file exists: %w", err)
- }
- if err := e.Close(); err != nil {
- return err
- }
-
- var sum abi.UnpaddedPieceSize
- for _, piece := range pieces {
- sum += piece.Size.Unpadded()
- }
- ssize, err := sector.ProofType.SectorSize()
- if err != nil {
- return err
- }
- ussize := abi.PaddedPieceSize(ssize).Unpadded()
- if sum != ussize {
- return xerrors.Errorf("aggregated piece sizes don't match sector size: %d != %d (%d)", sum, ussize, int64(ussize-sum))
- }
-
- // TODO: context cancellation respect
- _, err = ffi.SealPreCommitPhase1(
- sector.ProofType,
- paths.Cache,
- paths.Unsealed,
- paths.Sealed,
- sector.ID.Number,
- sector.ID.Miner,
- ticket,
- pieces,
- )
- if err != nil {
- return xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err)
- }
- return nil
-}
-
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storiface.PreCommit1Out, err error) {
paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed, storiface.FTSealed|storiface.FTCache, storiface.PathSealing)
if err != nil {
diff --git a/storage/sealer/ffiwrapper/sealer_test.go b/storage/sealer/ffiwrapper/sealer_test.go
index 4d3b1a9be..78c0ffb06 100644
--- a/storage/sealer/ffiwrapper/sealer_test.go
+++ b/storage/sealer/ffiwrapper/sealer_test.go
@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"io"
+ "io/fs"
"math/rand"
"os"
"path/filepath"
@@ -22,6 +23,7 @@ import (
ffi "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/filecoin-ffi/cgo"
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
+ commcid "github.com/filecoin-project/go-fil-commcid"
"github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-state-types/abi"
prooftypes "github.com/filecoin-project/go-state-types/proof"
@@ -412,7 +414,18 @@ func TestSealPoStNoCommit(t *testing.T) {
fmt.Printf("EPoSt: %s\n", epost.Sub(precommit).String())
}
+func TestMain(m *testing.M) {
+ //setup()
+ // Here it no-longer is bound to 30s but has 1m30s for the whole suite.
+ getGrothParamFileAndVerifyingKeys(sectorSize)
+
+ code := m.Run()
+ //shutdown()
+ os.Exit(code)
+}
+
func TestSealAndVerify3(t *testing.T) {
+ t.Skip("i flake on CI, re-enable me when you have a fix pls")
if testing.Short() {
t.Skip("skipping test in short mode")
}
@@ -424,8 +437,6 @@ func TestSealAndVerify3(t *testing.T) {
}
_ = os.Setenv("RUST_LOG", "trace")
- getGrothParamFileAndVerifyingKeys(sectorSize)
-
dir, err := os.MkdirTemp("", "sbtest")
if err != nil {
t.Fatal(err)
@@ -595,12 +606,18 @@ func BenchmarkWriteWithAlignment(b *testing.B) {
}
func openFDs(t *testing.T) int {
- dent, err := os.ReadDir("/proc/self/fd")
- require.NoError(t, err)
+ path := "/proc/self/fd"
+ if runtime.GOOS == "darwin" {
+ path = "/dev/fd"
+ }
+ dent, err := os.ReadDir(path)
+ if err != nil && !strings.Contains(err.Error(), "/dev/fd/3: bad file descriptor") {
+ require.NoError(t, err)
+ }
var skip int
for _, info := range dent {
- l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
+ l, err := os.Readlink(filepath.Join(path, info.Name()))
if err != nil {
continue
}
@@ -621,11 +638,15 @@ func requireFDsClosed(t *testing.T, start int) {
openNow := openFDs(t)
if start != openNow {
- dent, err := os.ReadDir("/proc/self/fd")
+ path := "/proc/self/fd"
+ if runtime.GOOS == "darwin" {
+ path = "/dev/fd"
+ }
+ dent, err := os.ReadDir(path)
require.NoError(t, err)
for _, info := range dent {
- l, err := os.Readlink(filepath.Join("/proc/self/fd", info.Name()))
+ l, err := os.Readlink(filepath.Join(path, info.Name()))
if err != nil {
fmt.Printf("FD err %s\n", err)
continue
@@ -1085,3 +1106,66 @@ func (c *closeAssertReader) Close() error {
}
var _ io.Closer = &closeAssertReader{}
+
+func TestGenerateSDR(t *testing.T) {
+ d := t.TempDir()
+
+ miner := abi.ActorID(123)
+
+ sp := &basicfs.Provider{
+ Root: d,
+ }
+ sb, err := New(sp)
+ require.NoError(t, err)
+
+ si := storiface.SectorRef{
+ ID: abi.SectorID{Miner: miner, Number: 1},
+ ProofType: sealProofType,
+ }
+
+ s := seal{ref: si}
+
+ sz := abi.PaddedPieceSize(sectorSize).Unpadded()
+
+ s.pi, err = sb.AddPiece(context.TODO(), si, []abi.UnpaddedPieceSize{}, sz, nullreader.NewNullReader(sz))
+ require.NoError(t, err)
+
+ s.ticket = sealRand
+
+ _, err = sb.SealPreCommit1(context.TODO(), si, s.ticket, []abi.PieceInfo{s.pi})
+ require.NoError(t, err)
+
+ // sdr for comparison
+
+ sdrCache := filepath.Join(d, "sdrcache")
+
+ commd, err := commcid.CIDToDataCommitmentV1(s.pi.PieceCID)
+ require.NoError(t, err)
+
+ replicaID, err := sealProofType.ReplicaId(si.ID.Miner, si.ID.Number, s.ticket, commd)
+ require.NoError(t, err)
+
+ err = ffi.GenerateSDR(sealProofType, sdrCache, replicaID)
+ require.NoError(t, err)
+
+ // list files in d recursively, for debug
+
+ require.NoError(t, filepath.Walk(d, func(path string, info fs.FileInfo, err error) error {
+ fmt.Println(path)
+ return nil
+ }))
+
+ // compare
+ lastLayerFile := "sc-02-data-layer-2.dat"
+
+ sdrFile := filepath.Join(sdrCache, lastLayerFile)
+ pc1File := filepath.Join(d, "cache/s-t0123-1/", lastLayerFile)
+
+ sdrData, err := os.ReadFile(sdrFile)
+ require.NoError(t, err)
+
+ pc1Data, err := os.ReadFile(pc1File)
+ require.NoError(t, err)
+
+ require.Equal(t, sdrData, pc1Data)
+}
diff --git a/storage/sealer/ffiwrapper/types.go b/storage/sealer/ffiwrapper/types.go
index d20d581db..1c039cd87 100644
--- a/storage/sealer/ffiwrapper/types.go
+++ b/storage/sealer/ffiwrapper/types.go
@@ -11,6 +11,8 @@ type SectorProvider interface {
// * returns storiface.ErrSectorNotFound if a requested existing sector doesn't exist
// * returns an error when allocate is set, and existing isn't, and the sector exists
AcquireSector(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
+ // AcquireSector, but a copy to preseve its long-term storage location.
+ AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error)
}
var _ SectorProvider = &basicfs.Provider{}
diff --git a/storage/sealer/manager.go b/storage/sealer/manager.go
index 3f496b7de..700a5aec5 100644
--- a/storage/sealer/manager.go
+++ b/storage/sealer/manager.go
@@ -330,7 +330,7 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storiface.Secto
// if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and
// put it in the sealing scratch space.
- sealFetch := PrepareAction{
+ unsealFetch := PrepareAction{
Action: func(ctx context.Context, worker Worker) error {
log.Debugf("copy sealed/cache sector data for sector %d", sector.ID)
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy))
@@ -359,7 +359,7 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storiface.Secto
selector := newExistingSelector(m.index, sector.ID, storiface.FTSealed|storiface.FTCache, true)
log.Debugf("will schedule unseal for sector %d", sector.ID)
- err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, sealFetch, func(ctx context.Context, w Worker) error {
+ err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
// TODO: make restartable
// NOTE: we're unsealing the whole sector here as with SDR we can't really
diff --git a/storage/sealer/manager_test.go b/storage/sealer/manager_test.go
index 8acd474a3..7c3e1a1f2 100644
--- a/storage/sealer/manager_test.go
+++ b/storage/sealer/manager_test.go
@@ -21,13 +21,16 @@ import (
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi"
+ "github.com/filecoin-project/go-paramfetch"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/proof"
"github.com/filecoin-project/go-statestore"
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
+ "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/storage/paths"
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
@@ -198,6 +201,16 @@ func (m NullReader) NullBytes() int64 {
return m.N
}
+func TestMain(m *testing.M) {
+ err := paramfetch.GetParams(context.TODO(), build.ParametersJSON(), build.SrsJSON(), uint64(2048))
+ if err != nil {
+ panic(xerrors.Errorf("failed to acquire Groth parameters for 2KiB sectors: %w", err))
+ }
+
+ code := m.Run()
+ os.Exit(code)
+}
+
func TestSnapDeals(t *testing.T) {
logging.SetAllLoggers(logging.LevelWarn)
ctx := context.Background()
@@ -248,7 +261,7 @@ func TestSnapDeals(t *testing.T) {
// Precommit and Seal a CC sector
fmt.Printf("PC1\n")
- ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9}
+ ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
pc1Out, err := m.SealPreCommit1(ctx, sid, ticket, ccPieces)
require.NoError(t, err)
fmt.Printf("PC2\n")
diff --git a/storage/sealer/proofpaths/cachefiles.go b/storage/sealer/proofpaths/cachefiles.go
new file mode 100644
index 000000000..5e41f831d
--- /dev/null
+++ b/storage/sealer/proofpaths/cachefiles.go
@@ -0,0 +1,30 @@
+package proofpaths
+
+import (
+ "fmt"
+
+ "github.com/filecoin-project/go-state-types/abi"
+)
+
+var dataFilePrefix = "sc-02-data-"
+
+func LayerFileName(layer int) string {
+ return fmt.Sprintf("%slayer-%d.dat", dataFilePrefix, layer)
+}
+
+func SDRLayers(spt abi.RegisteredSealProof) (int, error) {
+ switch spt {
+ case abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg2KiBV1_1:
+ return 2, nil
+ case abi.RegisteredSealProof_StackedDrg8MiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1_1:
+ return 2, nil
+ case abi.RegisteredSealProof_StackedDrg512MiBV1, abi.RegisteredSealProof_StackedDrg512MiBV1_1:
+ return 2, nil
+ case abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1_1:
+ return 11, nil
+ case abi.RegisteredSealProof_StackedDrg64GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1_1:
+ return 11, nil
+ default:
+ return 0, fmt.Errorf("unsupported proof type: %v", spt)
+ }
+}
diff --git a/storage/sealer/proofpaths/cachefiles_test.go b/storage/sealer/proofpaths/cachefiles_test.go
new file mode 100644
index 000000000..b2c0639c8
--- /dev/null
+++ b/storage/sealer/proofpaths/cachefiles_test.go
@@ -0,0 +1,16 @@
+package proofpaths
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/filecoin-project/go-state-types/abi"
+)
+
+func TestSDRLayersDefined(t *testing.T) {
+ for proof := range abi.SealProofInfos {
+ _, err := SDRLayers(proof)
+ require.NoError(t, err)
+ }
+}
diff --git a/storage/sealer/roprov.go b/storage/sealer/roprov.go
index c225fda78..bc38efd7a 100644
--- a/storage/sealer/roprov.go
+++ b/storage/sealer/roprov.go
@@ -36,3 +36,7 @@ func (l *readonlyProvider) AcquireSector(ctx context.Context, id storiface.Secto
return p, cancel, err
}
+
+func (l *readonlyProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ return storiface.SectorPaths{}, nil, xerrors.New("read-only storage")
+}
diff --git a/storage/sealer/storiface/worker.go b/storage/sealer/storiface/worker.go
index 2badad292..e84fd8aa9 100644
--- a/storage/sealer/storiface/worker.go
+++ b/storage/sealer/storiface/worker.go
@@ -186,12 +186,20 @@ const (
ErrTempAllocateSpace
)
+type WorkError interface {
+ ErrCode() ErrorCode
+}
+
type CallError struct {
Code ErrorCode
Message string
sub error
}
+func (c *CallError) ErrCode() ErrorCode {
+ return c.Code
+}
+
func (c *CallError) Error() string {
return fmt.Sprintf("storage call error %d: %s", c.Code, c.Message)
}
@@ -204,6 +212,8 @@ func (c *CallError) Unwrap() error {
return errors.New(c.Message)
}
+var _ WorkError = &CallError{}
+
func Err(code ErrorCode, sub error) *CallError {
return &CallError{
Code: code,
diff --git a/storage/sealer/worker_local.go b/storage/sealer/worker_local.go
index 24b9ff247..cc4a81599 100644
--- a/storage/sealer/worker_local.go
+++ b/storage/sealer/worker_local.go
@@ -180,6 +180,10 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor
}, nil
}
+func (l *localWorkerPathProvider) AcquireSectorCopy(ctx context.Context, id storiface.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, ptype storiface.PathType) (storiface.SectorPaths, func(), error) {
+ return (&localWorkerPathProvider{w: l.w, op: storiface.AcquireCopy}).AcquireSector(ctx, id, existing, allocate, ptype)
+}
+
func (l *LocalWorker) ffiExec() (storiface.Storage, error) {
return ffiwrapper.New(&localWorkerPathProvider{w: l})
}
@@ -571,15 +575,16 @@ func (l *LocalWorker) UnsealPiece(ctx context.Context, sector storiface.SectorRe
return nil, xerrors.Errorf("unsealing sector: %w", err)
}
- if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTSealed); err != nil {
- return nil, xerrors.Errorf("removing source data: %w", err)
+ // note: the unsealed file is moved to long-term storage in Manager.SectorsUnsealPiece
+
+ storageTypes := []storiface.SectorFileType{storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache}
+ for _, fileType := range storageTypes {
+ if err = l.storage.RemoveCopies(ctx, sector.ID, fileType); err != nil {
+ return nil, xerrors.Errorf("removing source data: %w", err)
+ }
}
- if err = l.storage.RemoveCopies(ctx, sector.ID, storiface.FTCache); err != nil {
- return nil, xerrors.Errorf("removing source data: %w", err)
- }
-
- log.Debugf("worker has unsealed piece, sector=%+v", sector.ID)
+ log.Debugf("unsealed piece, sector=%+v", sector.ID)
return nil, nil
})
diff --git a/storage/wdpost/wdpost_run.go b/storage/wdpost/wdpost_run.go
index c2a448fb0..0168bc706 100644
--- a/storage/wdpost/wdpost_run.go
+++ b/storage/wdpost/wdpost_run.go
@@ -32,6 +32,8 @@ import (
)
// recordPoStFailure records a failure in the journal.
+//
+//nolint:unused
func (s *WindowPoStScheduler) recordPoStFailure(err error, ts *types.TipSet, deadline *dline.Info) {
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
c := evtCommon{Error: err}
diff --git a/storage/wdpost/wdpost_run_faults.go b/storage/wdpost/wdpost_run_faults.go
index 2474ce77b..f36b30d35 100644
--- a/storage/wdpost/wdpost_run_faults.go
+++ b/storage/wdpost/wdpost_run_faults.go
@@ -195,106 +195,6 @@ func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint6
return batchedRecoveryDecls, msgs, nil
}
-// declareFaults identifies the sectors on the specified proving deadline that
-// are faulty, and reports the faults on chain via the `DeclareFaults` message
-// to our miner actor.
-//
-// NOTE: THIS CODE ISN'T INVOKED AFTER THE IGNITION UPGRADE
-// This is always invoked ahead of time, before the deadline for the evaluated
-// sectors arrives. That way, faults are declared before a penalty is accrued.
-//
-// If a declaration is made, it awaits for build.MessageConfidence confirmations
-// on chain before returning.
-//
-// TODO: the waiting should happen in the background. Right now this
-//
-// is blocking/delaying the actual generation and submission of WindowPoSts in
-// this deadline!
-func (s *WindowPoStScheduler) declareFaults(ctx context.Context, dlIdx uint64, partitions []api.Partition, tsk types.TipSetKey) ([]miner.FaultDeclaration, *types.SignedMessage, error) {
- ctx, span := trace.StartSpan(ctx, "storage.declareFaults")
- defer span.End()
-
- bad := uint64(0)
- params := &miner.DeclareFaultsParams{
- Faults: []miner.FaultDeclaration{},
- }
-
- for partIdx, partition := range partitions {
- nonFaulty, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors)
- if err != nil {
- return nil, nil, xerrors.Errorf("determining non faulty sectors: %w", err)
- }
-
- good, err := s.checkSectors(ctx, nonFaulty, tsk)
- if err != nil {
- return nil, nil, xerrors.Errorf("checking sectors: %w", err)
- }
-
- newFaulty, err := bitfield.SubtractBitField(nonFaulty, good)
- if err != nil {
- return nil, nil, xerrors.Errorf("calculating faulty sector set: %w", err)
- }
-
- c, err := newFaulty.Count()
- if err != nil {
- return nil, nil, xerrors.Errorf("counting faulty sectors: %w", err)
- }
-
- if c == 0 {
- continue
- }
-
- bad += c
-
- params.Faults = append(params.Faults, miner.FaultDeclaration{
- Deadline: dlIdx,
- Partition: uint64(partIdx),
- Sectors: newFaulty,
- })
- }
-
- faults := params.Faults
- if len(faults) == 0 {
- return faults, nil, nil
- }
-
- log.Errorw("DETECTED FAULTY SECTORS, declaring faults", "count", bad)
-
- enc, aerr := actors.SerializeParams(params)
- if aerr != nil {
- return faults, nil, xerrors.Errorf("could not serialize declare faults parameters: %w", aerr)
- }
-
- msg := &types.Message{
- To: s.actor,
- Method: builtin.MethodsMiner.DeclareFaults,
- Params: enc,
- Value: types.NewInt(0), // TODO: Is there a fee?
- }
- spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}
- if err := s.prepareMessage(ctx, msg, spec); err != nil {
- return faults, nil, err
- }
-
- sm, err := s.api.MpoolPushMessage(ctx, msg, spec)
- if err != nil {
- return faults, sm, xerrors.Errorf("pushing message to mpool: %w", err)
- }
-
- log.Warnw("declare faults Message CID", "cid", sm.Cid())
-
- rec, err := s.api.StateWaitMsg(context.TODO(), sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
- if err != nil {
- return faults, sm, xerrors.Errorf("declare faults wait error: %w", err)
- }
-
- if rec.Receipt.ExitCode != 0 {
- return faults, sm, xerrors.Errorf("declare faults wait non-0 exit code: %d", rec.Receipt.ExitCode)
- }
-
- return faults, sm, nil
-}
-
func (s *WindowPoStScheduler) asyncFaultRecover(di dline.Info, ts *types.TipSet) {
go func() {
// check faults / recoveries for the *next* deadline. It's already too
diff --git a/storage/wdpost/wdpost_sched.go b/storage/wdpost/wdpost_sched.go
index 29c39ad9e..059b1bf0c 100644
--- a/storage/wdpost/wdpost_sched.go
+++ b/storage/wdpost/wdpost_sched.go
@@ -225,6 +225,8 @@ func (s *WindowPoStScheduler) update(ctx context.Context, revert, apply *types.T
}
// onAbort is called when generating proofs or submitting proofs is aborted
+//
+//nolint:unused
func (s *WindowPoStScheduler) onAbort(ts *types.TipSet, deadline *dline.Info) {
s.journal.RecordEvent(s.evtTypes[evtTypeWdPoStScheduler], func() interface{} {
c := evtCommon{}