master-to-sturdy
This commit is contained in:
parent
c47992a866
commit
a520ee85d6
@ -6,9 +6,17 @@ orbs:
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
# Must match GO_VERSION_MIN in project root. Change in gen.go
|
||||
- image: cimg/go:1.19.7
|
||||
# Must match GO_VERSION_MIN in project root
|
||||
- image: cimg/go:1.20.7
|
||||
resource_class: medium+
|
||||
golang-2xl:
|
||||
docker:
|
||||
# Must match GO_VERSION_MIN in project root
|
||||
- image: cimg/go:1.20.7
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
- image: ubuntu:20.04
|
||||
|
||||
commands:
|
||||
build-platform-specific:
|
||||
@ -548,12 +556,6 @@ workflows:
|
||||
- build
|
||||
suite: itest-batch_deal
|
||||
target: "./itests/batch_deal_test.go"
|
||||
- test:
|
||||
name: test-itest-ccupgrade
|
||||
requires:
|
||||
- build
|
||||
suite: itest-ccupgrade
|
||||
target: "./itests/ccupgrade_test.go"
|
||||
- test:
|
||||
name: test-itest-cli
|
||||
requires:
|
||||
@ -891,12 +893,6 @@ workflows:
|
||||
- build
|
||||
suite: itest-remove_verifreg_datacap
|
||||
target: "./itests/remove_verifreg_datacap_test.go"
|
||||
- test:
|
||||
name: test-itest-sdr_upgrade
|
||||
requires:
|
||||
- build
|
||||
suite: itest-sdr_upgrade
|
||||
target: "./itests/sdr_upgrade_test.go"
|
||||
- test:
|
||||
name: test-itest-sealing_resources
|
||||
requires:
|
||||
@ -921,12 +917,6 @@ workflows:
|
||||
- build
|
||||
suite: itest-sector_import_simple
|
||||
target: "./itests/sector_import_simple_test.go"
|
||||
- test:
|
||||
name: test-itest-sector_make_cc_avail
|
||||
requires:
|
||||
- build
|
||||
suite: itest-sector_make_cc_avail
|
||||
target: "./itests/sector_make_cc_avail_test.go"
|
||||
- test:
|
||||
name: test-itest-sector_miner_collateral
|
||||
requires:
|
||||
@ -945,18 +935,6 @@ workflows:
|
||||
- build
|
||||
suite: itest-sector_pledge
|
||||
target: "./itests/sector_pledge_test.go"
|
||||
- test:
|
||||
name: test-itest-sector_prefer_no_upgrade
|
||||
requires:
|
||||
- build
|
||||
suite: itest-sector_prefer_no_upgrade
|
||||
target: "./itests/sector_prefer_no_upgrade_test.go"
|
||||
- test:
|
||||
name: test-itest-sector_revert_available
|
||||
requires:
|
||||
- build
|
||||
suite: itest-sector_revert_available
|
||||
target: "./itests/sector_revert_available_test.go"
|
||||
- test:
|
||||
name: test-itest-sector_terminate
|
||||
requires:
|
||||
@ -981,12 +959,6 @@ workflows:
|
||||
- build
|
||||
suite: itest-splitstore
|
||||
target: "./itests/splitstore_test.go"
|
||||
- test:
|
||||
name: test-itest-tape
|
||||
requires:
|
||||
- build
|
||||
suite: itest-tape
|
||||
target: "./itests/tape_test.go"
|
||||
- test:
|
||||
name: test-itest-verifreg
|
||||
requires:
|
||||
@ -1046,6 +1018,7 @@ workflows:
|
||||
suite: utest-unit-cli
|
||||
target: "./cli/... ./cmd/... ./api/..."
|
||||
get-params: true
|
||||
executor: golang-2xl
|
||||
- test:
|
||||
name: test-unit-node
|
||||
requires:
|
||||
@ -1053,13 +1026,15 @@ workflows:
|
||||
suite: utest-unit-node
|
||||
target: "./node/..."
|
||||
|
||||
|
||||
- test:
|
||||
name: test-unit-rest
|
||||
requires:
|
||||
- build
|
||||
suite: utest-unit-rest
|
||||
target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..."
|
||||
resource_class: 2xlarge
|
||||
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
|
||||
|
||||
executor: golang-2xl
|
||||
- test:
|
||||
name: test-unit-storage
|
||||
requires:
|
||||
@ -1067,6 +1042,7 @@ workflows:
|
||||
suite: utest-unit-storage
|
||||
target: "./storage/... ./extern/..."
|
||||
|
||||
|
||||
- test:
|
||||
go-test-flags: "-run=TestMulticoreSDR"
|
||||
requires:
|
||||
|
@ -67,6 +67,8 @@ func main() {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Redundantly flag both absolute and relative paths as excluded
|
||||
excluded[filepath.Join(repo, s)] = struct{}{}
|
||||
excluded[e] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
@ -6,9 +6,17 @@ orbs:
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
# Must match GO_VERSION_MIN in project root. Change in gen.go
|
||||
- image: cimg/go:[[ .GoVersion]]
|
||||
# Must match GO_VERSION_MIN in project root
|
||||
- image: cimg/go:1.20.7
|
||||
resource_class: medium+
|
||||
golang-2xl:
|
||||
docker:
|
||||
# Must match GO_VERSION_MIN in project root
|
||||
- image: cimg/go:1.20.7
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
- image: ubuntu:20.04
|
||||
|
||||
commands:
|
||||
build-platform-specific:
|
||||
@ -561,7 +569,8 @@ workflows:
|
||||
suite: utest-[[ $suite ]]
|
||||
target: "[[ $pkgs ]]"
|
||||
[[if eq $suite "unit-cli"]]get-params: true[[end]]
|
||||
[[- if eq $suite "unit-rest"]]resource_class: 2xlarge[[end]]
|
||||
[[if eq $suite "unit-cli"]]executor: golang-2xl[[end]]
|
||||
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
|
||||
[[- end]]
|
||||
- test:
|
||||
go-test-flags: "-run=TestMulticoreSDR"
|
||||
|
31
.github/ISSUE_TEMPLATE/task.md
vendored
Normal file
31
.github/ISSUE_TEMPLATE/task.md
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
name: New Task
|
||||
about: A larger yet well-scoped task
|
||||
title: '<title>'
|
||||
labels: Needs Triage
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## User Story
|
||||
<!-- Why? -->
|
||||
|
||||
## Acceptance Criteria
|
||||
<!-- What? -->
|
||||
<!-- add description-->
|
||||
|
||||
```[tasklist]
|
||||
### Deliverables
|
||||
|
||||
```
|
||||
|
||||
## Technical Breakdown
|
||||
```[tasklist]
|
||||
### Development
|
||||
|
||||
```
|
||||
|
||||
```[tasklist]
|
||||
### Testing
|
||||
|
||||
```
|
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@ -16,6 +16,7 @@ Before you mark the PR ready for review, please make sure that:
|
||||
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
||||
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
|
||||
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
|
||||
- [ ] If the PR affects users (e.g., new feature, bug fix, system requirements change), update the CHANGELOG.md and add details to the UNRELEASED section.
|
||||
- [ ] New features have usage guidelines and / or documentation updates in
|
||||
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
|
||||
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||
|
@ -12,9 +12,9 @@ linters:
|
||||
- unconvert
|
||||
- staticcheck
|
||||
- varcheck
|
||||
- structcheck
|
||||
- deadcode
|
||||
- scopelint
|
||||
- unused
|
||||
|
||||
# We don't want to skip builtin/
|
||||
skip-dirs-use-default: false
|
||||
|
106
CHANGELOG.md
106
CHANGELOG.md
@ -3,9 +3,111 @@
|
||||
# UNRELEASED
|
||||
|
||||
## New features
|
||||
- feat: Added new environment variable `LOTUS_EXEC_TRACE_CACHE_SIZE` to configure execution trace cache size ([filecoin-project/lotus#10585](https://github.com/filecoin-project/lotus/pull/10585))
|
||||
- If unset, we default to caching 16 most recent execution traces. Node operatores may want to set this to 0 while exchanges may want to crank it up.
|
||||
- feat: Added new tracing API (**HIGHLY EXPERIMENTAL**) supporting two RPC methods: `trace_block` and `trace_replayBlockTransactions` ([filecoin-project/lotus#11100](https://github.com/filecoin-project/lotus/pull/11100))
|
||||
|
||||
# v1.23.3 / 2023-08-01
|
||||
|
||||
This feature release of Lotus includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
|
||||
|
||||
This feature release requires a **minimum Go version of v1.19.12 or higher to successfully build Lotus**. Go version 1.20 is also supported, but 1.21 is NOT.
|
||||
|
||||
## Highlights
|
||||
|
||||
- [Lotus now includes a Slasher tool](https://github.com/filecoin-project/lotus/pull/10928) to monitor the network for Consensus Faults, and report them as appropriate
|
||||
- The Slasher investigates all incoming blocks, and assesses whether they trigger any of the three Consensus Faults defined in the Filecoin protocol
|
||||
- If any faults are detected, the Slasher sends a `ReportConsensusFault` message to the faulty miner
|
||||
- For more information on the Slasher, including how to run it, please find the documentation [here](https://lotus.filecoin.io/lotus/manage/slasher-and-disputer/)
|
||||
- The Ethereum-like RPC exposed by Lotus is now compatible with EIP-1898: https://github.com/filecoin-project/lotus/pull/10815
|
||||
- The lotus-miner PieceReader now supports parallel reads: https://github.com/filecoin-project/lotus/pull/10913
|
||||
- Added new environment variable `LOTUS_EXEC_TRACE_CACHE_SIZE` to configure execution trace cache size ([filecoin-project/lotus#10585](https://github.com/filecoin-project/lotus/pull/10585))
|
||||
- If unset, we default to caching 16 most recent execution traces. Storage Providers may want to set this to 0, while exchanges may want to crank it up.
|
||||
|
||||
## New features
|
||||
- feat: miner cli: sectors list upgrade-bounds tool ([filecoin-project/lotus#10923](https://github.com/filecoin-project/lotus/pull/10923))
|
||||
- Add new RPC stress testing tool (lotus-bench rpc) with rich reporting ([filecoin-project/lotus#10761](https://github.com/filecoin-project/lotus/pull/10761))
|
||||
- feat: alert: Add FVM_CONCURRENCY alert ([filecoin-project/lotus#10933](https://github.com/filecoin-project/lotus/pull/10933))
|
||||
- feat: Add eth_syncing RPC method ([filecoin-project/lotus#10719](https://github.com/filecoin-project/lotus/pull/10719))
|
||||
- feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797))
|
||||
- feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929))
|
||||
|
||||
## Improvements
|
||||
- chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040))
|
||||
- feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015))
|
||||
- sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002))
|
||||
- chore: not display privatekey ([filecoin-project/lotus#11006](https://github.com/filecoin-project/lotus/pull/11006))
|
||||
- chore: shed: update actor version ([filecoin-project/lotus#11020](https://github.com/filecoin-project/lotus/pull/11020))
|
||||
- chore: migrate to boxo ([filecoin-project/lotus#10921](https://github.com/filecoin-project/lotus/pull/10921))
|
||||
- feat: deflake TestDealsWithFinalizeEarly ([filecoin-project/lotus#10978](https://github.com/filecoin-project/lotus/pull/10978))
|
||||
- fix: pubsub: do not treat ErrExistingNonce as Reject ([filecoin-project/lotus#10973](https://github.com/filecoin-project/lotus/pull/10973))
|
||||
- feat: deflake TestDMLevelPartialRetrieval (#10972) ([filecoin-project/lotus#10972](https://github.com/filecoin-project/lotus/pull/10972))
|
||||
- fix: eth: ensure that the event topics are non-nil ([filecoin-project/lotus#10971](https://github.com/filecoin-project/lotus/pull/10971))
|
||||
- Add comment stating msgIndex is an experimental feature ([filecoin-project/lotus#10968](https://github.com/filecoin-project/lotus/pull/10968))
|
||||
- feat: cli(compute-state) default to the tipset at the given epoch ([filecoin-project/lotus#10965](https://github.com/filecoin-project/lotus/pull/10965))
|
||||
- Upgrade urfave dependency which now supports DisableSliceFlagSeparato… ([filecoin-project/lotus#10950](https://github.com/filecoin-project/lotus/pull/10950))
|
||||
- Add new lotus-shed command for computing eth hash for a given message cid (#10961) ([filecoin-project/lotus#10961](https://github.com/filecoin-project/lotus/pull/10961))
|
||||
- Prefill GetTipsetByHeight skiplist cache on lotus startup ([filecoin-project/lotus#10955](https://github.com/filecoin-project/lotus/pull/10955))
|
||||
- Add lotus-shed command for backfilling txhash.db ([filecoin-project/lotus#10932](https://github.com/filecoin-project/lotus/pull/10932))
|
||||
- chore: deps: update to go-libp2p 0.27.5 ([filecoin-project/lotus#10948](https://github.com/filecoin-project/lotus/pull/10948))
|
||||
- Small improvement to make gen output ([filecoin-project/lotus#10951](https://github.com/filecoin-project/lotus/pull/10951))
|
||||
- fix: improve perf of msgindex backfill ([filecoin-project/lotus#10941](https://github.com/filecoin-project/lotus/pull/10941))
|
||||
- deps: update libp2p ([filecoin-project/lotus#10936](https://github.com/filecoin-project/lotus/pull/10936))
|
||||
- sealing: Improve upgrade sector selection ([filecoin-project/lotus#10915](https://github.com/filecoin-project/lotus/pull/10915))
|
||||
- Add timing test for mpool select with a large mpool dump ([filecoin-project/lotus#10650](https://github.com/filecoin-project/lotus/pull/10650))
|
||||
- feat: slashfilter: drop outdated near-upgrade check ([filecoin-project/lotus#10925](https://github.com/filecoin-project/lotus/pull/10925))
|
||||
- opt: MinerInfo adds the PendingOwnerAddress field ([filecoin-project/lotus#10927](https://github.com/filecoin-project/lotus/pull/10927))
|
||||
- feat: itest: force PoSt more aggressively around deadline closure ([filecoin-project/lotus#10926](https://github.com/filecoin-project/lotus/pull/10926))
|
||||
- test: messagepool: gas rewards are negative if GasFeeCap too low ([filecoin-project/lotus#10649](https://github.com/filecoin-project/lotus/pull/10649))
|
||||
- fix: types: error out on decoding BlockMsg with extraneous data ([filecoin-project/lotus#10863](https://github.com/filecoin-project/lotus/pull/10863))
|
||||
- update interop upgrade schedule ([filecoin-project/lotus#10879](https://github.com/filecoin-project/lotus/pull/10879))
|
||||
- itests: Test PoSt V1_1 on workers ([filecoin-project/lotus#10732](https://github.com/filecoin-project/lotus/pull/10732))
|
||||
- Update gas_balancing.md ([filecoin-project/lotus#10924](https://github.com/filecoin-project/lotus/pull/10924))
|
||||
- feat: cli: Make compact partitions cmd better ([filecoin-project/lotus#9070](https://github.com/filecoin-project/lotus/pull/9070))
|
||||
- fix: include extra messages in ComputeState InvocResult output ([filecoin-project/lotus#10628](https://github.com/filecoin-project/lotus/pull/10628))
|
||||
- feat: pubsub: treat ErrGasFeeCapTooLow as ignore, not reject ([filecoin-project/lotus#10652](https://github.com/filecoin-project/lotus/pull/10652))
|
||||
- feat: run lotus-shed commands in context that is cancelled on sigterm ([filecoin-project/lotus#10877](https://github.com/filecoin-project/lotus/pull/10877))
|
||||
- fix:lotus-fountain:set default data-cap same as MinVerifiedDealSize ([filecoin-project/lotus#10920](https://github.com/filecoin-project/lotus/pull/10920))
|
||||
- pass the right g-recaptcha data
|
||||
- fix: not call RUnlock ([filecoin-project/lotus#10912](https://github.com/filecoin-project/lotus/pull/10912))
|
||||
- opt: cli: If present, print Events Root ([filecoin-project/lotus#10893](https://github.com/filecoin-project/lotus/pull/10893))
|
||||
- Calibration faucet UI improvements ([filecoin-project/lotus#10905](https://github.com/filecoin-project/lotus/pull/10905))
|
||||
- chore: chain: replace storetheindex with go-libipni ([filecoin-project/lotus#10841](https://github.com/filecoin-project/lotus/pull/10841))
|
||||
- Add alerts to `Lotus info` cmd ([filecoin-project/lotus#10894](https://github.com/filecoin-project/lotus/pull/10894))
|
||||
- fix: cli: make redeclare cmd work properly ([filecoin-project/lotus#10860](https://github.com/filecoin-project/lotus/pull/10860))
|
||||
- fix: shed remove datacap not working with ledger ([filecoin-project/lotus#10880](https://github.com/filecoin-project/lotus/pull/10880))
|
||||
- Check if epoch is negative in GetTipsetByHeight ([filecoin-project/lotus#10878](https://github.com/filecoin-project/lotus/pull/10878))
|
||||
- chore: update go-fil-markets ([filecoin-project/lotus#10867](https://github.com/filecoin-project/lotus/pull/10867))
|
||||
- feat: alerts: Add lotus-miner legacy-markets alert ([filecoin-project/lotus#10868](https://github.com/filecoin-project/lotus/pull/10868))
|
||||
- feat:fountain:add grant-datacap support ([filecoin-project/lotus#10856](https://github.com/filecoin-project/lotus/pull/10856))
|
||||
- feat: itests: add logs to blockminer.go failure case ([filecoin-project/lotus#10861](https://github.com/filecoin-project/lotus/pull/10861))
|
||||
- feat: eth: Add support for blockHash param in eth_getLogs ([filecoin-project/lotus#10782](https://github.com/filecoin-project/lotus/pull/10782))
|
||||
- lotus-fountain: make compatible with 0x addresses #10560 ([filecoin-project/lotus#10784](https://github.com/filecoin-project/lotus/pull/10784))
|
||||
- feat: deflake sector_import_simple ([filecoin-project/lotus#10858](https://github.com/filecoin-project/lotus/pull/10858))
|
||||
- fix: splitstore: remove deadlock around waiting for sync ([filecoin-project/lotus#10857](https://github.com/filecoin-project/lotus/pull/10857))
|
||||
- fix: sched: Address GET_32G_MAX_CONCURRENT regression (#10850) ([filecoin-project/lotus#10850](https://github.com/filecoin-project/lotus/pull/10850))
|
||||
- feat: fix deadlock in splitstore-mpool interaction ([filecoin-project/lotus#10840](https://github.com/filecoin-project/lotus/pull/10840))
|
||||
- chore: update go-libp2p to v0.27.3 ([filecoin-project/lotus#10671](https://github.com/filecoin-project/lotus/pull/10671))
|
||||
- libp2p: add QUIC and WebTransport to default listen addresses ([filecoin-project/lotus#10848](https://github.com/filecoin-project/lotus/pull/10848))
|
||||
- fix: ci: Debugging m1 build ([filecoin-project/lotus#10749](https://github.com/filecoin-project/lotus/pull/10749))
|
||||
- Validate that FromBlock/ToBlock epoch is indeed a hex value (#10780) ([filecoin-project/lotus#10780](https://github.com/filecoin-project/lotus/pull/10780))
|
||||
- fix: remove invalid field UpgradePriceListOopsHeight ([filecoin-project/lotus#10772](https://github.com/filecoin-project/lotus/pull/10772))
|
||||
- feat: deflake eth_balance_test ([filecoin-project/lotus#10847](https://github.com/filecoin-project/lotus/pull/10847))
|
||||
- fix: tests: Use mutex-wrapped datastore in storage tests ([filecoin-project/lotus#10846](https://github.com/filecoin-project/lotus/pull/10846))
|
||||
- Make lotus-fountain UI slightly friendlier ([filecoin-project/lotus#10785](https://github.com/filecoin-project/lotus/pull/10785))
|
||||
- Make (un)subscribe and filter RPC methods require only read perm ([filecoin-project/lotus#10825](https://github.com/filecoin-project/lotus/pull/10825))
|
||||
- deps: Update go-jsonrpc to v0.3.1 ([filecoin-project/lotus#10845](https://github.com/filecoin-project/lotus/pull/10845))
|
||||
- feat: deflake paych_api_test ([filecoin-project/lotus#10843](https://github.com/filecoin-project/lotus/pull/10843))
|
||||
- fix: Eth RPC: do not occlude block param errors. ([filecoin-project/lotus#10534](https://github.com/filecoin-project/lotus/pull/10534))
|
||||
- feat: cli: More ux-friendly batching cmds ([filecoin-project/lotus#10837](https://github.com/filecoin-project/lotus/pull/10837))
|
||||
- fix: cli: Hide legacy markets cmds ([filecoin-project/lotus#10842](https://github.com/filecoin-project/lotus/pull/10842))
|
||||
- feat: chainstore: exit early in MaybeTakeHeavierTipset ([filecoin-project/lotus#10839](https://github.com/filecoin-project/lotus/pull/10839))
|
||||
- fix: itest: fix eth deploy test flake ([filecoin-project/lotus#10829](https://github.com/filecoin-project/lotus/pull/10829))
|
||||
- style: mempool: chain errors using xerrors.Errorf ([filecoin-project/lotus#10836](https://github.com/filecoin-project/lotus/pull/10836))
|
||||
- feat: deflake msgindex_test.go ([filecoin-project/lotus#10826](https://github.com/filecoin-project/lotus/pull/10826))
|
||||
- feat: deflake TestEthFeeHistory ([filecoin-project/lotus#10816](https://github.com/filecoin-project/lotus/pull/10816))
|
||||
- feat: make RunClientTest louder when deals fail ([filecoin-project/lotus#10817](https://github.com/filecoin-project/lotus/pull/10817))
|
||||
- fix: cli: Change arg wording in change-beneficiary cmd ([filecoin-project/lotus#10823](https://github.com/filecoin-project/lotus/pull/10823))
|
||||
- refactor: streamline error handling in CheckPendingMessages (#10818) ([filecoin-project/lotus#10818](https://github.com/filecoin-project/lotus/pull/10818))
|
||||
- feat: Add tmp indices to events table while performing migration to V2
|
||||
|
||||
# v1.23.2 / 2023-06-28
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#####################################
|
||||
FROM golang:1.19.7-buster AS lotus-builder
|
||||
FROM golang:1.20.7-bullseye AS lotus-builder
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||
@ -58,7 +58,7 @@ COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/
|
||||
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.* /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc \
|
||||
|
@ -1 +1 @@
|
||||
1.19.7
|
||||
1.20.7
|
||||
|
2
Makefile
2
Makefile
@ -203,7 +203,7 @@ lotus-health:
|
||||
.PHONY: lotus-health
|
||||
BINS+=lotus-health
|
||||
|
||||
lotus-wallet:
|
||||
lotus-wallet: $(BUILD_DEPS)
|
||||
rm -f lotus-wallet
|
||||
$(GOCC) build $(GOFLAGS) -o lotus-wallet ./cmd/lotus-wallet
|
||||
.PHONY: lotus-wallet
|
||||
|
@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l
|
||||
|
||||
#### Go
|
||||
|
||||
To build Lotus, you need a working installation of [Go 1.19.7 or higher](https://golang.org/dl/):
|
||||
To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/):
|
||||
|
||||
```bash
|
||||
wget -c https://golang.org/dl/go1.19.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
```
|
||||
|
||||
**TIP:**
|
||||
|
@ -641,6 +641,11 @@ type FullNode interface {
|
||||
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
||||
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
|
||||
|
||||
// StateGetRandomnessDigestFromTickets. is used to sample the chain for randomness.
|
||||
StateGetRandomnessDigestFromTickets(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
|
||||
// StateGetRandomnessDigestFromBeacon is used to sample the beacon for randomness.
|
||||
StateGetRandomnessDigestFromBeacon(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
|
||||
|
||||
// StateGetBeaconEntry returns the beacon entry for the given filecoin epoch. If
|
||||
// the entry has not yet been produced, the call will block until the entry
|
||||
// becomes available
|
||||
@ -863,6 +868,13 @@ type FullNode interface {
|
||||
// Returns the client version
|
||||
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
|
||||
|
||||
// TraceAPI related methods
|
||||
//
|
||||
// Returns traces created at given block
|
||||
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read
|
||||
// Replays all transactions in a block returning the requested traces for each transaction
|
||||
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
|
||||
|
||||
// CreateBackup creates node backup onder the specified file name. The
|
||||
// method requires that the lotus daemon is running with the
|
||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
@ -65,6 +66,11 @@ type Gateway interface {
|
||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
||||
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
|
||||
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
|
||||
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
|
||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
|
||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||
@ -121,4 +127,6 @@ type Gateway interface {
|
||||
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
|
||||
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
|
||||
Web3ClientVersion(ctx context.Context) (string, error)
|
||||
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
|
||||
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
|
||||
}
|
||||
|
@ -40,6 +40,9 @@ func CreateEthRPCAliases(as apitypes.Aliaser) {
|
||||
as.AliasMethod("eth_subscribe", "Filecoin.EthSubscribe")
|
||||
as.AliasMethod("eth_unsubscribe", "Filecoin.EthUnsubscribe")
|
||||
|
||||
as.AliasMethod("trace_block", "Filecoin.EthTraceBlock")
|
||||
as.AliasMethod("trace_replayBlockTransactions", "Filecoin.EthTraceReplayBlockTransactions")
|
||||
|
||||
as.AliasMethod("net_version", "Filecoin.NetVersion")
|
||||
as.AliasMethod("net_listening", "Filecoin.NetListening")
|
||||
|
||||
|
@ -1491,6 +1491,36 @@ func (mr *MockFullNodeMockRecorder) EthSyncing(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0)
|
||||
}
|
||||
|
||||
// EthTraceBlock mocks base method.
|
||||
func (m *MockFullNode) EthTraceBlock(arg0 context.Context, arg1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthTraceBlock", arg0, arg1)
|
||||
ret0, _ := ret[0].([]*ethtypes.EthTraceBlock)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthTraceBlock indicates an expected call of EthTraceBlock.
|
||||
func (mr *MockFullNodeMockRecorder) EthTraceBlock(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceBlock", reflect.TypeOf((*MockFullNode)(nil).EthTraceBlock), arg0, arg1)
|
||||
}
|
||||
|
||||
// EthTraceReplayBlockTransactions mocks base method.
|
||||
func (m *MockFullNode) EthTraceReplayBlockTransactions(arg0 context.Context, arg1 string, arg2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "EthTraceReplayBlockTransactions", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].([]*ethtypes.EthTraceReplayBlockTransaction)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// EthTraceReplayBlockTransactions indicates an expected call of EthTraceReplayBlockTransactions.
|
||||
func (mr *MockFullNodeMockRecorder) EthTraceReplayBlockTransactions(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceReplayBlockTransactions", reflect.TypeOf((*MockFullNode)(nil).EthTraceReplayBlockTransactions), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// EthUninstallFilter mocks base method.
|
||||
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -3263,6 +3293,36 @@ func (mr *MockFullNodeMockRecorder) StateGetNetworkParams(arg0 interface{}) *gom
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetNetworkParams", reflect.TypeOf((*MockFullNode)(nil).StateGetNetworkParams), arg0)
|
||||
}
|
||||
|
||||
// StateGetRandomnessDigestFromBeacon mocks base method.
|
||||
func (m *MockFullNode) StateGetRandomnessDigestFromBeacon(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetRandomnessDigestFromBeacon", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetRandomnessDigestFromBeacon indicates an expected call of StateGetRandomnessDigestFromBeacon.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetRandomnessDigestFromBeacon(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessDigestFromBeacon", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessDigestFromBeacon), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetRandomnessDigestFromTickets mocks base method.
|
||||
func (m *MockFullNode) StateGetRandomnessDigestFromTickets(arg0 context.Context, arg1 abi.ChainEpoch, arg2 types.TipSetKey) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetRandomnessDigestFromTickets", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(abi.Randomness)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetRandomnessDigestFromTickets indicates an expected call of StateGetRandomnessDigestFromTickets.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetRandomnessDigestFromTickets(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetRandomnessDigestFromTickets", reflect.TypeOf((*MockFullNode)(nil).StateGetRandomnessDigestFromTickets), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetRandomnessFromBeacon mocks base method.
|
||||
func (m *MockFullNode) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
143
api/proxy_gen.go
143
api/proxy_gen.go
@ -316,6 +316,10 @@ type FullNodeMethods struct {
|
||||
|
||||
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"`
|
||||
|
||||
EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) `perm:"read"`
|
||||
|
||||
EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) `perm:"read"`
|
||||
|
||||
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"`
|
||||
|
||||
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"`
|
||||
@ -494,6 +498,10 @@ type FullNodeMethods struct {
|
||||
|
||||
StateGetNetworkParams func(p0 context.Context) (*NetworkParams, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessDigestFromBeacon func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessDigestFromTickets func(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromTickets func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
@ -728,6 +736,10 @@ type GatewayMethods struct {
|
||||
|
||||
EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) ``
|
||||
|
||||
EthTraceBlock func(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) ``
|
||||
|
||||
EthTraceReplayBlockTransactions func(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) ``
|
||||
|
||||
EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) ``
|
||||
|
||||
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
|
||||
@ -766,6 +778,16 @@ type GatewayMethods struct {
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
||||
|
||||
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||
|
||||
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||
|
||||
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
|
||||
|
||||
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
|
||||
|
||||
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
|
||||
|
||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
||||
|
||||
StateLookupID func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
|
||||
@ -2443,6 +2465,28 @@ func (s *FullNodeStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult
|
||||
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||
if s.Internal.EthTraceBlock == nil {
|
||||
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||
}
|
||||
return s.Internal.EthTraceBlock(p0, p1)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||
if s.Internal.EthTraceReplayBlockTransactions == nil {
|
||||
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||
}
|
||||
return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
||||
if s.Internal.EthUninstallFilter == nil {
|
||||
return false, ErrNotSupported
|
||||
@ -3422,6 +3466,28 @@ func (s *FullNodeStub) StateGetNetworkParams(p0 context.Context) (*NetworkParams
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetRandomnessDigestFromBeacon(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
|
||||
if s.Internal.StateGetRandomnessDigestFromBeacon == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetRandomnessDigestFromBeacon(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetRandomnessDigestFromBeacon(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetRandomnessDigestFromTickets(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
|
||||
if s.Internal.StateGetRandomnessDigestFromTickets == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetRandomnessDigestFromTickets(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetRandomnessDigestFromTickets(p0 context.Context, p1 abi.ChainEpoch, p2 types.TipSetKey) (abi.Randomness, error) {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetRandomnessFromBeacon(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) {
|
||||
if s.Internal.StateGetRandomnessFromBeacon == nil {
|
||||
return *new(abi.Randomness), ErrNotSupported
|
||||
@ -4643,6 +4709,28 @@ func (s *GatewayStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult,
|
||||
return *new(ethtypes.EthSyncingResult), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||
if s.Internal.EthTraceBlock == nil {
|
||||
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||
}
|
||||
return s.Internal.EthTraceBlock(p0, p1)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) EthTraceBlock(p0 context.Context, p1 string) ([]*ethtypes.EthTraceBlock, error) {
|
||||
return *new([]*ethtypes.EthTraceBlock), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||
if s.Internal.EthTraceReplayBlockTransactions == nil {
|
||||
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||
}
|
||||
return s.Internal.EthTraceReplayBlockTransactions(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) EthTraceReplayBlockTransactions(p0 context.Context, p1 string, p2 []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) {
|
||||
return *new([]*ethtypes.EthTraceReplayBlockTransaction), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) {
|
||||
if s.Internal.EthUninstallFilter == nil {
|
||||
return false, ErrNotSupported
|
||||
@ -4852,6 +4940,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocation == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocation(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocationForPendingDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocations == nil {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocations(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaim == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaim(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaims == nil {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaims(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateListMiners(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) {
|
||||
if s.Internal.StateListMiners == nil {
|
||||
return *new([]address.Address), ErrNotSupported
|
||||
|
@ -312,6 +312,7 @@ type NetworkParams struct {
|
||||
SupportedProofTypes []abi.RegisteredSealProof
|
||||
PreCommitChallengeDelay abi.ChainEpoch
|
||||
ForkUpgradeParams ForkUpgradeParams
|
||||
Eip155ChainID int
|
||||
}
|
||||
|
||||
type ForkUpgradeParams struct {
|
||||
@ -339,6 +340,7 @@ type ForkUpgradeParams struct {
|
||||
UpgradeHyggeHeight abi.ChainEpoch
|
||||
UpgradeLightningHeight abi.ChainEpoch
|
||||
UpgradeThunderHeight abi.ChainEpoch
|
||||
UpgradeWatermelonHeight abi.ChainEpoch
|
||||
}
|
||||
|
||||
type NonceMapType map[address.Address]uint64
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
@ -61,6 +62,11 @@ type Gateway interface {
|
||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
|
||||
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
|
||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error)
|
||||
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error)
|
||||
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error)
|
||||
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error)
|
||||
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||
|
@ -479,6 +479,16 @@ type GatewayMethods struct {
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
|
||||
|
||||
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||
|
||||
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) ``
|
||||
|
||||
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) ``
|
||||
|
||||
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) ``
|
||||
|
||||
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) ``
|
||||
|
||||
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
|
||||
|
||||
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
|
||||
@ -2851,6 +2861,61 @@ func (s *GatewayStub) StateGetActor(p0 context.Context, p1 address.Address, p2 t
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocation == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocation(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocationForPendingDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocations == nil {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocations(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaim == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaim(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaims == nil {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaims(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *GatewayStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *GatewayStruct) StateGetReceipt(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) {
|
||||
if s.Internal.StateGetReceipt == nil {
|
||||
return nil, ErrNotSupported
|
||||
|
@ -182,7 +182,6 @@ type SplitStore struct {
|
||||
|
||||
compactionIndex int64
|
||||
pruneIndex int64
|
||||
onlineGCCnt int64
|
||||
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
|
BIN
build/actors/v12.tar.zst
Normal file
BIN
build/actors/v12.tar.zst
Normal file
Binary file not shown.
@ -71,9 +71,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacebojf25kc5yo7gskdbdgg5f52oppej2jp6nknzlvrww4ue5vkddd2"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceavue3zekq4wmvttck2vgxlcensrsgh5niu5qhna2owejycorftcc"),
|
||||
},
|
||||
}, {
|
||||
Network: "butterflynet",
|
||||
Version: 11,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzaceaiy4dsxxus5xp5n5i4tjzkb7sc54mjz7qnk2efhgmsrobjesxnza"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecfdqb7p3jakhaa3cqnzpt7hxmhghrbxvafsylqno3febx55fnidw"),
|
||||
"cron": MustParseCid("bafk2bzaceavmqu2qihgbe3xdaotgypuzvdpiifnm7ll6rolks2u4lac6voosk"),
|
||||
"datacap": MustParseCid("bafk2bzacealtvh65rzb34fmyzw4m2np2htnio4w3pn4alzqovwxkdbf23dvpo"),
|
||||
"eam": MustParseCid("bafk2bzacedko6hcjmwpuwgma5pb4gr2wgyvregk3nqqjxit7dv4es6vh5cjoc"),
|
||||
"ethaccount": MustParseCid("bafk2bzacedhcei2xnr34poxr4xziypm2obqlibke4cs2cjfnr3sz6nf6h7fyy"),
|
||||
"evm": MustParseCid("bafk2bzacebn5lwxboiikhz67ajwa34v2lc4qevnhpwdnipbmrnutkvrrqkb46"),
|
||||
"init": MustParseCid("bafk2bzacea6vw4esh5tg7mprv5jkbx5xcyilcy4vvf64lss32mjyuvv2mh5ng"),
|
||||
"multisig": MustParseCid("bafk2bzacedq2afnwcfipay5twv5mgzjoio5bbjvyo4yqchdwqcr7wrareyx54"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebbsvr7i7mqmaadyjibe5wxnv7bwvvec2wlgknuwda6ep45amnd5w"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzaceafuh6idvaqqkj353vs4qdl42tcmvnymewu5zf4rq2nruxdyunses"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceb7bx4honi3byjllpdk6fea32dpu3vqvil3okodybdk5m3erlnwjw"),
|
||||
"storageminer": MustParseCid("bafk2bzacebxjhofdr3sb2uhy2ky2vcijh4nhmwkh5xijtbgk6dzkknji2kn7a"),
|
||||
"storagepower": MustParseCid("bafk2bzaceabskmmkas6njbowols7t4ib3bipa5abpomk3jtgfwojtzd7mjzfm"),
|
||||
"system": MustParseCid("bafk2bzacedtuh7cht3fud7fb4avl4g2zbz57lc4ohiaufpaex6dkmdokn5rgo"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"),
|
||||
},
|
||||
}, {
|
||||
Network: "butterflynet",
|
||||
Version: 11,
|
||||
Version: 12,
|
||||
BundleGitTag: "v11.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzaceaiy4dsxxus5xp5n5i4tjzkb7sc54mjz7qnk2efhgmsrobjesxnza"),
|
||||
Actors: map[string]cid.Cid{
|
||||
@ -155,10 +178,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec67wuchq64k7kgrujguukjvdlsl24pgighqdx5vgjhyk6bycrwnc"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 11,
|
||||
BundleGitTag: "v11.0.0-rc2",
|
||||
ManifestCid: MustParseCid("bafy2bzacedhuowetjy2h4cxnijz2l64h4mzpk5m256oywp4evarpono3cjhco"),
|
||||
Network: "calibrationnet",
|
||||
Version: 11,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacedhuowetjy2h4cxnijz2l64h4mzpk5m256oywp4evarpono3cjhco"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacebor5mnjnsav34cmm5pcd3dy4wubbv4wtcrvba7depy3sct7ie4sy"),
|
||||
"cron": MustParseCid("bafk2bzacebetehhedh55alfn4rcx2mhjhvuiustxlhtxc3drkemnpttws5eqw"),
|
||||
@ -177,6 +200,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacedqvik2n3phnj3cni3h2k5mtvz43nyq7mdmv7k7euejysvajywdug"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceceoo5jlom2zweh7kpye2vkj33wgqnkjshlsw2neemqkfg5g2rmvg"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 12,
|
||||
BundleGitTag: "v11.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacec5fl7crmxyw234qsmijvffhssgqwuphyaejulbryhel2pxxrxgey"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecrjovyiuh4jryepy4pxddzqjyrg2hfinxzbv37bpzlci54r5mkr6"),
|
||||
"cron": MustParseCid("bafk2bzacedy76woxmtalmsuaji4kog6wmq4h2kcgcyv5wpxbdz7f2ls2tjjmw"),
|
||||
"datacap": MustParseCid("bafk2bzacec2inqddxpfm3rufwqr752d2ok3ve4cxfhmloyosy6rj2krtkpwus"),
|
||||
"eam": MustParseCid("bafk2bzacea6sxno66egkqz5rqjq4e22obkeblxl7e3funjifljuinmrc2ztzg"),
|
||||
"ethaccount": MustParseCid("bafk2bzacecdsvs7xm3ncm66lsjqh65uzhr3rmu3dlux7qzdgpg737r4kslhxm"),
|
||||
"evm": MustParseCid("bafk2bzaceaz3b66m2znt27clmbp2zi5jsobw6g2x6fiezynyijgtkehgqhq3a"),
|
||||
"init": MustParseCid("bafk2bzacecdrw7uedx456hnowtyyhm63mkekdlkh3vmlhvqlya6pn6pokiq5y"),
|
||||
"multisig": MustParseCid("bafk2bzaceaxyxvmng5cel5huzy5nezscm34s7wuzn2fhndexurs3xjtp7xg5i"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedrmyc4c6u6ipdo7hwaagx3urr47r4pw6lwv257wqbj6roumwfvky"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacecq3bhrkatwash5zhy2275ksaj3criqb6rox5e3hsyvz7mrl2jh3o"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedswrpkbh7jmttskllbblym7oj2ynxp7bxtj2fpbxsx55mraog6sc"),
|
||||
"storageminer": MustParseCid("bafk2bzacecki6ckm7gf4uje3fxvw6x5f77ukaqctvcsfha6oaecvl67veh3sg"),
|
||||
"storagepower": MustParseCid("bafk2bzacecjcvxwibkgpufeah33gfd2jzlqjx5rn2pguvvch2squon23u6kne"),
|
||||
"system": MustParseCid("bafk2bzaceavvlgqbcwhy3c24aa24z23wcbzggmb66gj7x5if7o3fbvddaocc4"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedmxdtnpy2mc63b6bi2h4vp4dfc6hxjckqnwaxyijgkpmangygcmk"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 8,
|
||||
@ -246,9 +292,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacecfivztuulqqv4o5oyvvvrkblwix4hqt24pqru6ivnpioefhuhria"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecdhw6x7dfrxfysmn6tdbn2ny464omgqppxhjuawxauscidppd7pc"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 11,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacebexc2jgzwr5ngn6jdnkwdqwwmcapajuypdgvopoe6bnvp4yxm4o2"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceanjiq5m3feytue5m7hhxfkob2ofg2greoct5tr77reuhrjglo66g"),
|
||||
"cron": MustParseCid("bafk2bzaceavgd5qj6n744tukhdrvxejygzs3jnlizmcvjsdnxkgiimrd5jrys"),
|
||||
"datacap": MustParseCid("bafk2bzacedmdywxwrzop2gmf4ys5stydlmvbe35j3nyr2efmf273briksuvse"),
|
||||
"eam": MustParseCid("bafk2bzacec7qo7s72li7tqysllstlrxxm2dhfqv2w32pytel2e775cki4ozqm"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceaygtkliu26ubb7ivljrvaeesp5sbjlis5okzl35ishxioa2tlx4w"),
|
||||
"evm": MustParseCid("bafk2bzacebo7iqzy2ophz4f3civzwlltec7q5fut7kmtfckr6vy33r6ic5eqe"),
|
||||
"init": MustParseCid("bafk2bzaceb7uzzlsquqwrqhb2vpbvk3jgr4wp5i3smu2splnag2v5sppdehns"),
|
||||
"multisig": MustParseCid("bafk2bzacebwibfqrytobl4pjtny244zkmfoomazbap3r5gddjryckx5js4csi"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacecuaa5esuxpouigxoamyl5gire2qqqhvyhewsig2x2j73f6ksh7go"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzaced4xxqhv63njf2ibvsqshlwikafctxev7aho5lgsfxyt2javjwvtw"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedwtx3xokqmbgkgkoqkdt6lam4ymdjb3eznlbtec5wcrtx74l2bpc"),
|
||||
"storageminer": MustParseCid("bafk2bzacebbbe4sdo3xxkez7x7lkl6j46w34vx7eg7xswmdzhp7moa44p3wjg"),
|
||||
"storagepower": MustParseCid("bafk2bzacedfgz6n24tjsor4pcayomim2f5f3a3fgyatmjgwxxeejna7okndda"),
|
||||
"system": MustParseCid("bafk2bzacebxfzeom3d7ahcz2n2nlwp7ncv767bdbbrisugks4l6v7lcu2tmyg"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 11,
|
||||
Version: 12,
|
||||
BundleGitTag: "v11.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacebexc2jgzwr5ngn6jdnkwdqwwmcapajuypdgvopoe6bnvp4yxm4o2"),
|
||||
Actors: map[string]cid.Cid{
|
||||
@ -329,9 +398,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzaceairk5qz5hyzt4yyaxa356aszyifswiust5ilxizwxujcmtzvjzoa"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaced2mkyqobpgna5jevosym3adv2bvraggigyz2jgn5cxymirxj4x3i"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 11,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzaceay35go4xbjb45km6o46e5bib3bi46panhovcbedrynzwmm3drr4i"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecf2pprkbdlpm4e2xz3ufunxtgrgyh2ie3stuqiyhibsvdze7kvri"),
|
||||
"cron": MustParseCid("bafk2bzaceasr5d2skowvzv5mzsyak6waqrgc46ewj6rzbapkfi5woom6n6bwa"),
|
||||
"datacap": MustParseCid("bafk2bzaceaqd77gptubupda7rp7daxkxbkzwc253dxhiyoezxvj2tljmkgpny"),
|
||||
"eam": MustParseCid("bafk2bzacedve6p4ye6zxydjbfs4ode5r2equ7rqzpyltujsq2lu6wyxnijfx4"),
|
||||
"ethaccount": MustParseCid("bafk2bzacea25xfsxwew3h2crer6jlb4c5vwu2gtch2jh73ocuxjhupenyrugy"),
|
||||
"evm": MustParseCid("bafk2bzacece5hivtkmi757lyfahgti7xuqgofodb2u65pxgf6oizfwiiwlcsi"),
|
||||
"init": MustParseCid("bafk2bzacecxnr5y7qifzdqqiwfbjxv2yr7lbkcyu3e2mf5zjdncteupxdlquu"),
|
||||
"multisig": MustParseCid("bafk2bzaceayap4k4u3lbysaeeixct5fvhmafy3fa5eagvdpk3i4a7ubfdpobe"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceafgrz5wepbein35gie7rnsu7zttxvgllgdneuefmmy4j5izydtza"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacedwbtfqlx47fdkxjrb5mwiatheci44x3zkpx33smybc2cme23ymuo"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceaj74fmooaf3gj3ebwon64ky7hhdh7kytdr3agclqfrqzmpzykh7g"),
|
||||
"storageminer": MustParseCid("bafk2bzacedb7bokkzzs7hnbhivp74pgcpermuy7j6b3ncodylksukkxtnn7ze"),
|
||||
"storagepower": MustParseCid("bafk2bzacedilnkegizkxz3nuutib4d4wwlk4bkla22loepia2h53yf4hysmq6"),
|
||||
"system": MustParseCid("bafk2bzacedpyoncjbl4oxkjm5e77ngvpy2xfajjc4myfsv2vltvzxioattlu2"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 11,
|
||||
Version: 12,
|
||||
BundleGitTag: "v11.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzaceay35go4xbjb45km6o46e5bib3bi46panhovcbedrynzwmm3drr4i"),
|
||||
Actors: map[string]cid.Cid{
|
||||
@ -435,9 +527,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacedakk5nofebyup4m7nvx6djksfwhnxzrfuq4oyemhpl4lllaikr64"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedfel6edzqpe5oujno7fog4i526go4dtcs6vwrdtbpy2xq6htvcg6"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 11,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacecnhaiwcrpyjvzl4uv4q3jzoif26okl3m66q3cijp3dfwlcxwztwo"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacealnlr7st6lkwoh6wxpf2hnrlex5sknaopgmkr2tuhg7vmbfy45so"),
|
||||
"cron": MustParseCid("bafk2bzacebpewdvvgt6tk2o2u4rcovdgym67tadiis5usemlbejg7k3kt567o"),
|
||||
"datacap": MustParseCid("bafk2bzacebslykoyrb2hm7aacjngqgd5n2wmeii2goadrs5zaya3pvdf6pdnq"),
|
||||
"eam": MustParseCid("bafk2bzaceaelwt4yfsfvsu3pa3miwalsvy3cfkcjvmt4sqoeopsppnrmj2mf2"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceclkmc4yidxc6lgcjpfypbde2eddnevcveo4j5kmh4ek6inqysz2k"),
|
||||
"evm": MustParseCid("bafk2bzacediwh6etwzwmb5pivtclpdplewdjzphouwqpppce6opisjv2fjqfe"),
|
||||
"init": MustParseCid("bafk2bzaceckwf3w6n2nw6eh77ktmsxqgsvshonvgnyk5q5syyngtetxvasfxg"),
|
||||
"multisig": MustParseCid("bafk2bzaceafajceqwg5ybiz7xw6rxammuirkgtuv625gzaehsqfprm4bazjmk"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceb4e6cnsnviegmqvsmoxzncruvhra54piq7bwiqfqevle6oob2gvo"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacebwjw2vxkobs7r2kwjdqqb42h2kucyuk6flbnyzw4odg5s4mogamo"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceazu2j2zu4p24tr22btnqzkhzjvyjltlvsagaj6w3syevikeb5d7m"),
|
||||
"storageminer": MustParseCid("bafk2bzacec24okjqrp7c7rj3hbrs5ez5apvwah2ruka6haesgfngf37mhk6us"),
|
||||
"storagepower": MustParseCid("bafk2bzaceaxgloxuzg35vu7l7tohdgaq2frsfp4ejmuo7tkoxjp5zqrze6sf4"),
|
||||
"system": MustParseCid("bafk2bzaced7npe5mt5nh72jxr2igi2sofoa7gedt4w6kueeke7i3xxugqpjfm"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 11,
|
||||
Version: 12,
|
||||
BundleGitTag: "v11.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacecnhaiwcrpyjvzl4uv4q3jzoif26okl3m66q3cijp3dfwlcxwztwo"),
|
||||
Actors: map[string]cid.Cid{
|
||||
@ -518,9 +633,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacecf2jimdz7knhngs64ximfz3eaud6s3kiunmkybgrkupdjyo2dw7o"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecdmek2htsgcyoyl35glakyab66cojqo2y335njnm7krleb6yfbps"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 11,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacea2vxre32tg3xhpejrktiuzx4d3pcoe7yyazgscfibmegmchr6n42"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"),
|
||||
"cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"),
|
||||
"datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"),
|
||||
"eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"),
|
||||
"evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"),
|
||||
"init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"),
|
||||
"multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"),
|
||||
"storageminer": MustParseCid("bafk2bzaced3yg5lctmswnbkxyd6cleg3llyux7fu2vbddyd2ho36fpym423mq"),
|
||||
"storagepower": MustParseCid("bafk2bzacebvpdf372fzxgixztbz2r7ayxyvx7jmdxwlfuqt2cq7tnqgie3klw"),
|
||||
"system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 11,
|
||||
Version: 12,
|
||||
BundleGitTag: "v11.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacea2vxre32tg3xhpejrktiuzx4d3pcoe7yyazgscfibmegmchr6n42"),
|
||||
Actors: map[string]cid.Cid{
|
||||
@ -601,9 +739,32 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacecf2jimdz7knhngs64ximfz3eaud6s3kiunmkybgrkupdjyo2dw7o"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecdmek2htsgcyoyl35glakyab66cojqo2y335njnm7krleb6yfbps"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 11,
|
||||
|
||||
ManifestCid: MustParseCid("bafy2bzacecojemqglhzzhjnhgtrcbsgkyv67ziytvtbhwlr4ym4oxqofv7zui"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"),
|
||||
"cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"),
|
||||
"datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"),
|
||||
"eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"),
|
||||
"ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"),
|
||||
"evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"),
|
||||
"init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"),
|
||||
"multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"),
|
||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
||||
"reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"),
|
||||
"storageminer": MustParseCid("bafk2bzacebqeztpa5exztccqjwqhan5droiy7ga6zekm6f2gzxoe655vneczm"),
|
||||
"storagepower": MustParseCid("bafk2bzaceb2tlyuwxncdxsh3hc4fwcjnpxaijkiv54ustwdjbrqabxdsc27km"),
|
||||
"system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 11,
|
||||
Version: 12,
|
||||
BundleGitTag: "v11.0.0",
|
||||
ManifestCid: MustParseCid("bafy2bzacecojemqglhzzhjnhgtrcbsgkyv67ziytvtbhwlr4ym4oxqofv7zui"),
|
||||
Actors: map[string]cid.Cid{
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -23,7 +23,7 @@ var NetworkBundle = "devnet"
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
var ActorDebugging = true
|
||||
|
||||
const GenesisNetworkVersion = network.Version18
|
||||
const GenesisNetworkVersion = network.Version20
|
||||
|
||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||
|
||||
@ -61,9 +61,11 @@ var UpgradeSharkHeight = abi.ChainEpoch(-20)
|
||||
|
||||
var UpgradeHyggeHeight = abi.ChainEpoch(-21)
|
||||
|
||||
var UpgradeLightningHeight = abi.ChainEpoch(30)
|
||||
var UpgradeLightningHeight = abi.ChainEpoch(-22)
|
||||
|
||||
var UpgradeThunderHeight = abi.ChainEpoch(1000)
|
||||
var UpgradeThunderHeight = abi.ChainEpoch(-23)
|
||||
|
||||
var UpgradeWatermelonHeight = abi.ChainEpoch(200)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -120,6 +122,7 @@ func init() {
|
||||
UpgradeHyggeHeight = getUpgradeHeight("LOTUS_HYGGE_HEIGHT", UpgradeHyggeHeight)
|
||||
UpgradeLightningHeight = getUpgradeHeight("LOTUS_LIGHTNING_HEIGHT", UpgradeLightningHeight)
|
||||
UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight)
|
||||
UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight)
|
||||
|
||||
BuildType |= Build2k
|
||||
|
||||
@ -129,6 +132,8 @@ const BlockDelaySecs = uint64(4)
|
||||
|
||||
const PropagationDelaySecs = uint64(1)
|
||||
|
||||
var EquivocationDelaySecs = uint64(0)
|
||||
|
||||
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
||||
// which the miner is slashed
|
||||
//
|
||||
|
@ -57,6 +57,9 @@ const UpgradeLightningHeight = 50
|
||||
|
||||
const UpgradeThunderHeight = UpgradeLightningHeight + 360
|
||||
|
||||
// ??????????
|
||||
const UpgradeWatermelonHeight = 999999999999999
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg512MiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
@ -83,6 +86,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
|
||||
var EquivocationDelaySecs = uint64(2)
|
||||
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 2
|
||||
|
||||
|
@ -79,6 +79,9 @@ const UpgradeLightningHeight = 489094
|
||||
// 2023-04-21T16:00:00Z
|
||||
const UpgradeThunderHeight = UpgradeLightningHeight + 3120
|
||||
|
||||
// ??????????
|
||||
const UpgradeWatermelonHeight = 999999999999999
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
@ -120,6 +123,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
var PropagationDelaySecs = uint64(10)
|
||||
|
||||
var EquivocationDelaySecs = uint64(2)
|
||||
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 4
|
||||
|
||||
|
@ -52,8 +52,9 @@ var UpgradeSkyrHeight = abi.ChainEpoch(-19)
|
||||
var UpgradeSharkHeight = abi.ChainEpoch(-20)
|
||||
var UpgradeHyggeHeight = abi.ChainEpoch(-21)
|
||||
var UpgradeLightningHeight = abi.ChainEpoch(-22)
|
||||
var UpgradeThunderHeight = abi.ChainEpoch(-23)
|
||||
|
||||
const UpgradeThunderHeight = 50
|
||||
const UpgradeWatermelonHeight = 50
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -120,6 +121,8 @@ const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
|
||||
var EquivocationDelaySecs = uint64(2)
|
||||
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 2
|
||||
|
||||
|
@ -90,10 +90,13 @@ const UpgradeSharkHeight = 2383680
|
||||
const UpgradeHyggeHeight = 2683348
|
||||
|
||||
// 2023-04-27T13:00:00Z
|
||||
var UpgradeLightningHeight = abi.ChainEpoch(2809800)
|
||||
const UpgradeLightningHeight = 2809800
|
||||
|
||||
// 2023-05-18T13:00:00Z
|
||||
var UpgradeThunderHeight = UpgradeLightningHeight + 2880*21
|
||||
const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21
|
||||
|
||||
// ???????
|
||||
var UpgradeWatermelonHeight = abi.ChainEpoch(9999999999)
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
@ -103,17 +106,15 @@ var ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
|
||||
var PreCommitChallengeDelay = abi.ChainEpoch(150)
|
||||
var PropagationDelaySecs = uint64(10)
|
||||
|
||||
var EquivocationDelaySecs = uint64(2)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_LIGHTNING") == "1" {
|
||||
UpgradeLightningHeight = math.MaxInt64
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_THUNDER") == "1" {
|
||||
UpgradeThunderHeight = math.MaxInt64
|
||||
if os.Getenv("LOTUS_DISABLE_WATERMELON") == "1" {
|
||||
UpgradeWatermelonHeight = math.MaxInt64
|
||||
}
|
||||
|
||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
|
||||
|
@ -30,7 +30,7 @@ const AllowableClockDriftSecs = uint64(1)
|
||||
/* inline-gen template
|
||||
const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
|
||||
/* inline-gen start */
|
||||
const TestNetworkVersion = network.Version20
|
||||
const TestNetworkVersion = network.Version21
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
|
@ -9,7 +9,6 @@ package build
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
@ -34,6 +33,7 @@ var (
|
||||
MinimumBaseFee = int64(100)
|
||||
BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
PropagationDelaySecs = uint64(6)
|
||||
EquivocationDelaySecs = uint64(2)
|
||||
SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
@ -109,6 +109,7 @@ var (
|
||||
UpgradeHyggeHeight abi.ChainEpoch = -20
|
||||
UpgradeLightningHeight abi.ChainEpoch = -21
|
||||
UpgradeThunderHeight abi.ChainEpoch = -22
|
||||
UpgradeWatermelonHeight abi.ChainEpoch = -23
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -138,7 +139,3 @@ const BootstrapPeerThreshold = 1
|
||||
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
|
||||
// As per https://github.com/ethereum-lists/chains
|
||||
const Eip155ChainId = 31415926
|
||||
|
||||
// Reducing the delivery delay for equivocation of
|
||||
// consistent broadcast to just half a second.
|
||||
var CBDeliveryDelay = 500 * time.Millisecond
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.23.3-dev"
|
||||
const BuildVersion = "1.23.5-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
@ -22,7 +22,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Methods = builtin11.MethodsAccount
|
||||
var Methods = builtin12.MethodsAccount
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||
@ -44,6 +44,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,6 +114,9 @@ func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (S
|
||||
case actorstypes.Version11:
|
||||
return make11(store, addr)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, addr)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -139,5 +145,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
62
chain/actors/builtin/account/v12.go
generated
Normal file
62
chain/actors/builtin/account/v12.go
generated
Normal file
@ -0,0 +1,62 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account12 "github.com/filecoin-project/go-state-types/builtin/v12/account"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, addr address.Address) (State, error) {
|
||||
out := state12{store: store}
|
||||
out.State = account12.State{Address: addr}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
account12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) PubkeyAddress() (address.Address, error) {
|
||||
return s.Address, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.AccountKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -5,7 +5,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
@ -40,6 +40,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,13 +110,16 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return make11(store)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin11.CronActorAddr
|
||||
Methods = builtin11.MethodsCron
|
||||
Address = builtin12.CronActorAddr
|
||||
Methods = builtin12.MethodsCron
|
||||
)
|
||||
|
||||
type State interface {
|
||||
@ -137,5 +143,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
57
chain/actors/builtin/cron/v12.go
generated
Normal file
57
chain/actors/builtin/cron/v12.go
generated
Normal file
@ -0,0 +1,57 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron12 "github.com/filecoin-project/go-state-types/builtin/v12/cron"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store) (State, error) {
|
||||
out := state12{store: store}
|
||||
out.State = *cron12.ConstructState(cron12.BuiltInEntries())
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
cron12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.CronKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -7,7 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
@ -17,8 +17,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin11.DatacapActorAddr
|
||||
Methods = builtin11.MethodsDatacap
|
||||
Address = builtin12.DatacapActorAddr
|
||||
Methods = builtin12.MethodsDatacap
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -38,6 +38,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -56,6 +59,9 @@ func MakeState(store adt.Store, av actorstypes.Version, governor address.Address
|
||||
case actorstypes.Version11:
|
||||
return make11(store, governor, bitwidth)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, governor, bitwidth)
|
||||
|
||||
default:
|
||||
return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av)
|
||||
}
|
||||
@ -79,5 +85,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
82
chain/actors/builtin/datacap/v12.go
generated
Normal file
82
chain/actors/builtin/datacap/v12.go
generated
Normal file
@ -0,0 +1,82 @@
|
||||
package datacap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
datacap12 "github.com/filecoin-project/go-state-types/builtin/v12/datacap"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, governor address.Address, bitwidth uint64) (State, error) {
|
||||
out := state12{store: store}
|
||||
s, err := datacap12.ConstructState(store, governor, bitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
datacap12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) Governor() (address.Address, error) {
|
||||
return s.State.Governor, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachClient(s.store, actors.Version12, s.verifiedClients, cb)
|
||||
}
|
||||
|
||||
func (s *state12) verifiedClients() (adt.Map, error) {
|
||||
return adt12.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth))
|
||||
}
|
||||
|
||||
func (s *state12) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version12, s.verifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.DatacapKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -5,7 +5,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Methods = builtin11.MethodsEVM
|
||||
var Methods = builtin12.MethodsEVM
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||
@ -30,6 +30,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,6 +48,9 @@ func MakeState(store adt.Store, av actorstypes.Version, bytecode cid.Cid) (State
|
||||
case actorstypes.Version11:
|
||||
return make11(store, bytecode)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, bytecode)
|
||||
|
||||
default:
|
||||
return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av)
|
||||
}
|
||||
|
72
chain/actors/builtin/evm/v12.go
generated
Normal file
72
chain/actors/builtin/evm/v12.go
generated
Normal file
@ -0,0 +1,72 @@
|
||||
package evm
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
evm12 "github.com/filecoin-project/go-state-types/builtin/v12/evm"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, bytecode cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
s, err := evm12.ConstructState(store, bytecode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
evm12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) Nonce() (uint64, error) {
|
||||
return s.State.Nonce, nil
|
||||
}
|
||||
|
||||
func (s *state12) IsAlive() (bool, error) {
|
||||
return s.State.Tombstone == nil, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) GetBytecodeCID() (cid.Cid, error) {
|
||||
return s.State.Bytecode, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetBytecodeHash() ([32]byte, error) {
|
||||
return s.State.BytecodeHash, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetBytecode() ([]byte, error) {
|
||||
bc, err := s.GetBytecodeCID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var byteCode abi.CborBytesTransparent
|
||||
if err := s.store.Get(s.store.Context(), bc, &byteCode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return byteCode, nil
|
||||
}
|
@ -7,7 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
@ -25,8 +25,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin11.InitActorAddr
|
||||
Methods = builtin11.MethodsInit
|
||||
Address = builtin12.InitActorAddr
|
||||
Methods = builtin12.MethodsInit
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -49,6 +49,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,6 +119,9 @@ func MakeState(store adt.Store, av actorstypes.Version, networkName string) (Sta
|
||||
case actorstypes.Version11:
|
||||
return make11(store, networkName)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, networkName)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -167,5 +173,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
147
chain/actors/builtin/init/v12.go
generated
Normal file
147
chain/actors/builtin/init/v12.go
generated
Normal file
@ -0,0 +1,147 @@
|
||||
package init
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, networkName string) (State, error) {
|
||||
out := state12{store: store}
|
||||
|
||||
s, err := init12.ConstructState(store, networkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
init12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) ResolveAddress(address address.Address) (address.Address, bool, error) {
|
||||
return s.State.ResolveAddress(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state12) MapAddressToNewID(address address.Address) (address.Address, error) {
|
||||
return s.State.MapAddressToNewID(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state12) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
|
||||
addrs, err := adt12.AsMap(s.store, s.State.AddressMap, builtin12.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var actorID cbg.CborInt
|
||||
return addrs.ForEach(&actorID, func(key string) error {
|
||||
addr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(abi.ActorID(actorID), addr)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state12) NetworkName() (dtypes.NetworkName, error) {
|
||||
return dtypes.NetworkName(s.State.NetworkName), nil
|
||||
}
|
||||
|
||||
func (s *state12) SetNetworkName(name string) error {
|
||||
s.State.NetworkName = name
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) SetNextID(id abi.ActorID) error {
|
||||
s.State.NextID = id
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) Remove(addrs ...address.Address) (err error) {
|
||||
m, err := adt12.AsMap(s.store, s.State.AddressMap, builtin12.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if err = m.Delete(abi.AddrKey(addr)); err != nil {
|
||||
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
|
||||
}
|
||||
}
|
||||
amr, err := m.Root()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get address map root: %w", err)
|
||||
}
|
||||
s.State.AddressMap = amr
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) SetAddressMap(mcid cid.Cid) error {
|
||||
s.State.AddressMap = mcid
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) AddressMap() (adt.Map, error) {
|
||||
return adt12.AsMap(s.store, s.State.AddressMap, builtin12.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state12) AddressMapBitWidth() int {
|
||||
return builtin12.DefaultHamtBitwidth
|
||||
}
|
||||
|
||||
func (s *state12) AddressMapHashFunction() func(input []byte) []byte {
|
||||
return func(input []byte) []byte {
|
||||
res := sha256.Sum256(input)
|
||||
return res[:]
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.InitKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -55,6 +55,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return make11(store)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -217,6 +223,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
|
||||
case actorstypes.Version11:
|
||||
return decodePublishStorageDealsReturn11(b)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return decodePublishStorageDealsReturn12(b)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -303,5 +312,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
377
chain/actors/builtin/market/v12.go
generated
Normal file
377
chain/actors/builtin/market/v12.go
generated
Normal file
@ -0,0 +1,377 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store) (State, error) {
|
||||
out := state12{store: store}
|
||||
|
||||
s, err := market12.ConstructState(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
market12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) TotalLocked() (abi.TokenAmount, error) {
|
||||
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
|
||||
fml = types.BigAdd(fml, s.TotalClientStorageFee)
|
||||
return fml, nil
|
||||
}
|
||||
|
||||
func (s *state12) BalancesChanged(otherState State) (bool, error) {
|
||||
otherState12, ok := otherState.(*state12)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.EscrowTable.Equals(otherState12.State.EscrowTable) || !s.State.LockedTable.Equals(otherState12.State.LockedTable), nil
|
||||
}
|
||||
|
||||
func (s *state12) StatesChanged(otherState State) (bool, error) {
|
||||
otherState12, ok := otherState.(*state12)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.States.Equals(otherState12.State.States), nil
|
||||
}
|
||||
|
||||
func (s *state12) States() (DealStates, error) {
|
||||
stateArray, err := adt12.AsArray(s.store, s.State.States, market12.StatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealStates12{stateArray}, nil
|
||||
}
|
||||
|
||||
func (s *state12) ProposalsChanged(otherState State) (bool, error) {
|
||||
otherState12, ok := otherState.(*state12)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Proposals.Equals(otherState12.State.Proposals), nil
|
||||
}
|
||||
|
||||
func (s *state12) Proposals() (DealProposals, error) {
|
||||
proposalArray, err := adt12.AsArray(s.store, s.State.Proposals, market12.ProposalsAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealProposals12{proposalArray}, nil
|
||||
}
|
||||
|
||||
func (s *state12) EscrowTable() (BalanceTable, error) {
|
||||
bt, err := adt12.AsBalanceTable(s.store, s.State.EscrowTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable12{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state12) LockedTable() (BalanceTable, error) {
|
||||
bt, err := adt12.AsBalanceTable(s.store, s.State.LockedTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable12{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state12) VerifyDealsForActivation(
|
||||
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
||||
) (weight, verifiedWeight abi.DealWeight, err error) {
|
||||
w, vw, _, err := market12.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
|
||||
return w, vw, err
|
||||
}
|
||||
|
||||
func (s *state12) NextID() (abi.DealID, error) {
|
||||
return s.State.NextID, nil
|
||||
}
|
||||
|
||||
type balanceTable12 struct {
|
||||
*adt12.BalanceTable
|
||||
}
|
||||
|
||||
func (bt *balanceTable12) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
|
||||
asMap := (*adt12.Map)(bt.BalanceTable)
|
||||
var ta abi.TokenAmount
|
||||
return asMap.ForEach(&ta, func(key string) error {
|
||||
a, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, ta)
|
||||
})
|
||||
}
|
||||
|
||||
type dealStates12 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) {
|
||||
var deal12 market12.DealState
|
||||
found, err := s.Array.Get(uint64(dealID), &deal12)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
deal := fromV12DealState(deal12)
|
||||
return &deal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
|
||||
var ds12 market12.DealState
|
||||
return s.Array.ForEach(&ds12, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV12DealState(ds12))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealStates12) decode(val *cbg.Deferred) (*DealState, error) {
|
||||
var ds12 market12.DealState
|
||||
if err := ds12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds := fromV12DealState(ds12)
|
||||
return &ds, nil
|
||||
}
|
||||
|
||||
func (s *dealStates12) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV12DealState(v12 market12.DealState) DealState {
|
||||
ret := DealState{
|
||||
SectorStartEpoch: v12.SectorStartEpoch,
|
||||
LastUpdatedEpoch: v12.LastUpdatedEpoch,
|
||||
SlashEpoch: v12.SlashEpoch,
|
||||
VerifiedClaim: 0,
|
||||
}
|
||||
|
||||
ret.VerifiedClaim = verifregtypes.AllocationId(v12.VerifiedClaim)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
type dealProposals12 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealProposals12) Get(dealID abi.DealID) (*DealProposal, bool, error) {
|
||||
var proposal12 market12.DealProposal
|
||||
found, err := s.Array.Get(uint64(dealID), &proposal12)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
proposal, err := fromV12DealProposal(proposal12)
|
||||
if err != nil {
|
||||
return nil, true, xerrors.Errorf("decoding proposal: %w", err)
|
||||
}
|
||||
|
||||
return &proposal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals12) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
|
||||
var dp12 market12.DealProposal
|
||||
return s.Array.ForEach(&dp12, func(idx int64) error {
|
||||
dp, err := fromV12DealProposal(dp12)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decoding proposal: %w", err)
|
||||
}
|
||||
|
||||
return cb(abi.DealID(idx), dp)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealProposals12) decode(val *cbg.Deferred) (*DealProposal, error) {
|
||||
var dp12 market12.DealProposal
|
||||
if err := dp12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dp, err := fromV12DealProposal(dp12)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &dp, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals12) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV12DealProposal(v12 market12.DealProposal) (DealProposal, error) {
|
||||
|
||||
label, err := fromV12Label(v12.Label)
|
||||
|
||||
if err != nil {
|
||||
return DealProposal{}, xerrors.Errorf("error setting deal label: %w", err)
|
||||
}
|
||||
|
||||
return DealProposal{
|
||||
PieceCID: v12.PieceCID,
|
||||
PieceSize: v12.PieceSize,
|
||||
VerifiedDeal: v12.VerifiedDeal,
|
||||
Client: v12.Client,
|
||||
Provider: v12.Provider,
|
||||
|
||||
Label: label,
|
||||
|
||||
StartEpoch: v12.StartEpoch,
|
||||
EndEpoch: v12.EndEpoch,
|
||||
StoragePricePerEpoch: v12.StoragePricePerEpoch,
|
||||
|
||||
ProviderCollateral: v12.ProviderCollateral,
|
||||
ClientCollateral: v12.ClientCollateral,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromV12Label(v12 market12.DealLabel) (DealLabel, error) {
|
||||
if v12.IsString() {
|
||||
str, err := v12.ToString()
|
||||
if err != nil {
|
||||
return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert string label to string: %w", err)
|
||||
}
|
||||
return markettypes.NewLabelFromString(str)
|
||||
}
|
||||
|
||||
bs, err := v12.ToBytes()
|
||||
if err != nil {
|
||||
return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert bytes label to bytes: %w", err)
|
||||
}
|
||||
return markettypes.NewLabelFromBytes(bs)
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn12)(nil)
|
||||
|
||||
func decodePublishStorageDealsReturn12(b []byte) (PublishStorageDealsReturn, error) {
|
||||
var retval market12.PublishStorageDealsReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
||||
}
|
||||
|
||||
return &publishStorageDealsReturn12{retval}, nil
|
||||
}
|
||||
|
||||
type publishStorageDealsReturn12 struct {
|
||||
market12.PublishStorageDealsReturn
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn12) IsDealValid(index uint64) (bool, int, error) {
|
||||
|
||||
set, err := r.ValidDeals.IsSet(index)
|
||||
if err != nil || !set {
|
||||
return false, -1, err
|
||||
}
|
||||
maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
|
||||
Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}})
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
outIdx, err := before.Count()
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
return set, int(outIdx), nil
|
||||
|
||||
}
|
||||
|
||||
func (r *publishStorageDealsReturn12) DealIDs() ([]abi.DealID, error) {
|
||||
return r.IDs, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) {
|
||||
|
||||
allocations, err := adt12.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
|
||||
}
|
||||
|
||||
var allocationId cbg.CborInt
|
||||
found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId)
|
||||
if err != nil {
|
||||
return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
|
||||
}
|
||||
if !found {
|
||||
return verifregtypes.NoAllocationID, nil
|
||||
}
|
||||
|
||||
return verifregtypes.AllocationId(allocationId), nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.MarketKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -48,6 +48,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,6 +118,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
case actors.Version11:
|
||||
return make11(store)
|
||||
|
||||
case actors.Version12:
|
||||
return make12(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -321,5 +327,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
591
chain/actors/builtin/miner/v12.go
generated
Normal file
591
chain/actors/builtin/miner/v12.go
generated
Normal file
@ -0,0 +1,591 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
rle "github.com/filecoin-project/go-bitfield/rle"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store) (State, error) {
|
||||
out := state12{store: store}
|
||||
out.State = miner12.State{}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
miner12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type deadline12 struct {
|
||||
miner12.Deadline
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type partition12 struct {
|
||||
miner12.Partition
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = xerrors.Errorf("failed to get available balance: %w", r)
|
||||
available = abi.NewTokenAmount(0)
|
||||
}
|
||||
}()
|
||||
// this panics if the miner doesnt have enough funds to cover their locked pledge
|
||||
available, err = s.GetAvailableBalance(bal)
|
||||
return available, err
|
||||
}
|
||||
|
||||
func (s *state12) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.CheckVestedFunds(s.store, epoch)
|
||||
}
|
||||
|
||||
func (s *state12) LockedFunds() (LockedFunds, error) {
|
||||
return LockedFunds{
|
||||
VestingFunds: s.State.LockedFunds,
|
||||
InitialPledgeRequirement: s.State.InitialPledge,
|
||||
PreCommitDeposits: s.State.PreCommitDeposits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state12) FeeDebt() (abi.TokenAmount, error) {
|
||||
return s.State.FeeDebt, nil
|
||||
}
|
||||
|
||||
func (s *state12) InitialPledge() (abi.TokenAmount, error) {
|
||||
return s.State.InitialPledge, nil
|
||||
}
|
||||
|
||||
func (s *state12) PreCommitDeposits() (abi.TokenAmount, error) {
|
||||
return s.State.PreCommitDeposits, nil
|
||||
}
|
||||
|
||||
// Returns nil, nil if sector is not found
|
||||
func (s *state12) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV12SectorOnChainInfo(*info)
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state12) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
|
||||
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SectorLocation{
|
||||
Deadline: dlIdx,
|
||||
Partition: partIdx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state12) NumLiveSectors() (uint64, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var total uint64
|
||||
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner12.Deadline) error {
|
||||
total += dl.LiveSectors
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// GetSectorExpiration returns the effective expiration of the given sector.
|
||||
//
|
||||
// If the sector does not expire early, the Early expiration field is 0.
|
||||
func (s *state12) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: this can be optimized significantly.
|
||||
// 1. If the sector is non-faulty, it will expire on-time (can be
|
||||
// learned from the sector info).
|
||||
// 2. If it's faulty, it will expire early within the first 42 entries
|
||||
// of the expiration queue.
|
||||
|
||||
stopErr := errors.New("stop")
|
||||
out := SectorExpiration{}
|
||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner12.Deadline) error {
|
||||
partitions, err := dl.PartitionsArray(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quant := s.State.QuantSpecForDeadline(dlIdx)
|
||||
var part miner12.Partition
|
||||
return partitions.ForEach(&part, func(partIdx int64) error {
|
||||
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if !found {
|
||||
return nil
|
||||
}
|
||||
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if found {
|
||||
// already terminated
|
||||
return stopErr
|
||||
}
|
||||
|
||||
q, err := miner12.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner12.PartitionExpirationAmtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var exp miner12.ExpirationSet
|
||||
return q.ForEach(&exp, func(epoch int64) error {
|
||||
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if early {
|
||||
out.Early = abi.ChainEpoch(epoch)
|
||||
return nil
|
||||
}
|
||||
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if onTime {
|
||||
out.OnTime = abi.ChainEpoch(epoch)
|
||||
return stopErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
if err == stopErr {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if out.Early == 0 && out.OnTime == 0 {
|
||||
return nil, xerrors.Errorf("failed to find sector %d", num)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV12SectorPreCommitOnChainInfo(*info)
|
||||
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state12) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
|
||||
precommitted, err := adt12.AsMap(s.store, s.State.PreCommittedSectors, builtin12.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var info miner12.SectorPreCommitOnChainInfo
|
||||
if err := precommitted.ForEach(&info, func(_ string) error {
|
||||
return cb(fromV12SectorPreCommitOnChainInfo(info))
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
|
||||
sectors, err := miner12.LoadSectors(s.store, s.State.Sectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If no sector numbers are specified, load all.
|
||||
if snos == nil {
|
||||
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
|
||||
var info12 miner12.SectorOnChainInfo
|
||||
if err := sectors.ForEach(&info12, func(_ int64) error {
|
||||
info := fromV12SectorOnChainInfo(info12)
|
||||
infos = append(infos, &info)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
// Otherwise, load selected.
|
||||
infos12, err := sectors.Load(*snos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos := make([]*SectorOnChainInfo, len(infos12))
|
||||
for i, info12 := range infos12 {
|
||||
info := fromV12SectorOnChainInfo(*info12)
|
||||
infos[i] = &info
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (s *state12) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
|
||||
var allocatedSectors bitfield.BitField
|
||||
err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
|
||||
return allocatedSectors, err
|
||||
}
|
||||
|
||||
func (s *state12) IsAllocated(num abi.SectorNumber) (bool, error) {
|
||||
allocatedSectors, err := s.loadAllocatedSectorNumbers()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return allocatedSectors.IsSet(uint64(num))
|
||||
}
|
||||
|
||||
func (s *state12) GetProvingPeriodStart() (abi.ChainEpoch, error) {
|
||||
return s.State.ProvingPeriodStart, nil
|
||||
}
|
||||
|
||||
func (s *state12) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
|
||||
allocatedSectors, err := s.loadAllocatedSectorNumbers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allocatedRuns, err := allocatedSectors.RunIterator()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unallocatedRuns, err := rle.Subtract(
|
||||
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
|
||||
allocatedRuns,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iter, err := rle.BitsFromRuns(unallocatedRuns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sectors := make([]abi.SectorNumber, 0, count)
|
||||
for iter.HasNext() && len(sectors) < count {
|
||||
nextNo, err := iter.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sectors = append(sectors, abi.SectorNumber(nextNo))
|
||||
}
|
||||
|
||||
return sectors, nil
|
||||
}
|
||||
|
||||
func (s *state12) GetAllocatedSectors() (*bitfield.BitField, error) {
|
||||
var allocatedSectors bitfield.BitField
|
||||
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &allocatedSectors, nil
|
||||
}
|
||||
|
||||
func (s *state12) LoadDeadline(idx uint64) (Deadline, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dl, err := dls.LoadDeadline(s.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &deadline12{*dl, s.store}, nil
|
||||
}
|
||||
|
||||
func (s *state12) ForEachDeadline(cb func(uint64, Deadline) error) error {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dls.ForEach(s.store, func(i uint64, dl *miner12.Deadline) error {
|
||||
return cb(i, &deadline12{*dl, s.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state12) NumDeadlines() (uint64, error) {
|
||||
return miner12.WPoStPeriodDeadlines, nil
|
||||
}
|
||||
|
||||
func (s *state12) DeadlinesChanged(other State) (bool, error) {
|
||||
other12, ok := other.(*state12)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !s.State.Deadlines.Equals(other12.Deadlines), nil
|
||||
}
|
||||
|
||||
func (s *state12) MinerInfoChanged(other State) (bool, error) {
|
||||
other0, ok := other.(*state12)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Info.Equals(other0.State.Info), nil
|
||||
}
|
||||
|
||||
func (s *state12) Info() (MinerInfo, error) {
|
||||
info, err := s.State.GetInfo(s.store)
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
ControlAddresses: info.ControlAddresses,
|
||||
|
||||
PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey),
|
||||
|
||||
PeerId: info.PeerId,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
WindowPoStProofType: info.WindowPoStProofType,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
|
||||
|
||||
Beneficiary: info.Beneficiary,
|
||||
BeneficiaryTerm: BeneficiaryTerm(info.BeneficiaryTerm),
|
||||
PendingBeneficiaryTerm: (*PendingBeneficiaryChange)(info.PendingBeneficiaryTerm),
|
||||
}
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func (s *state12) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
|
||||
return s.State.RecordedDeadlineInfo(epoch), nil
|
||||
}
|
||||
|
||||
func (s *state12) DeadlineCronActive() (bool, error) {
|
||||
return s.State.DeadlineCronActive, nil
|
||||
}
|
||||
|
||||
func (s *state12) sectors() (adt.Array, error) {
|
||||
return adt12.AsArray(s.store, s.Sectors, miner12.SectorsAmtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state12) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
|
||||
var si miner12.SectorOnChainInfo
|
||||
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV12SectorOnChainInfo(si), nil
|
||||
}
|
||||
|
||||
func (s *state12) precommits() (adt.Map, error) {
|
||||
return adt12.AsMap(s.store, s.PreCommittedSectors, builtin12.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state12) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
|
||||
var sp miner12.SectorPreCommitOnChainInfo
|
||||
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorPreCommitOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV12SectorPreCommitOnChainInfo(sp), nil
|
||||
}
|
||||
|
||||
func (s *state12) EraseAllUnproven() error {
|
||||
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dls.ForEach(s.store, func(dindx uint64, dl *miner12.Deadline) error {
|
||||
ps, err := dl.PartitionsArray(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var part miner12.Partition
|
||||
err = ps.ForEach(&part, func(pindx int64) error {
|
||||
_ = part.ActivateUnproven()
|
||||
err = ps.Set(uint64(pindx), &part)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dl.Partitions, err = ps.Root()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dls.UpdateDeadline(s.store, dindx, dl)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.State.SaveDeadlines(s.store, dls)
|
||||
|
||||
}
|
||||
|
||||
func (d *deadline12) LoadPartition(idx uint64) (Partition, error) {
|
||||
p, err := d.Deadline.LoadPartition(d.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &partition12{*p, d.store}, nil
|
||||
}
|
||||
|
||||
func (d *deadline12) ForEachPartition(cb func(uint64, Partition) error) error {
|
||||
ps, err := d.Deadline.PartitionsArray(d.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var part miner12.Partition
|
||||
return ps.ForEach(&part, func(i int64) error {
|
||||
return cb(uint64(i), &partition12{part, d.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (d *deadline12) PartitionsChanged(other Deadline) (bool, error) {
|
||||
other12, ok := other.(*deadline12)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return !d.Deadline.Partitions.Equals(other12.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline12) PartitionsPoSted() (bitfield.BitField, error) {
|
||||
return d.Deadline.PartitionsPoSted, nil
|
||||
}
|
||||
|
||||
func (d *deadline12) DisputableProofCount() (uint64, error) {
|
||||
|
||||
ops, err := d.OptimisticProofsSnapshotArray(d.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return ops.Length(), nil
|
||||
|
||||
}
|
||||
|
||||
func (p *partition12) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
||||
func (p *partition12) FaultySectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Faults, nil
|
||||
}
|
||||
|
||||
func (p *partition12) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func (p *partition12) UnprovenSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Unproven, nil
|
||||
}
|
||||
|
||||
func fromV12SectorOnChainInfo(v12 miner12.SectorOnChainInfo) SectorOnChainInfo {
|
||||
info := SectorOnChainInfo{
|
||||
SectorNumber: v12.SectorNumber,
|
||||
SealProof: v12.SealProof,
|
||||
SealedCID: v12.SealedCID,
|
||||
DealIDs: v12.DealIDs,
|
||||
Activation: v12.Activation,
|
||||
Expiration: v12.Expiration,
|
||||
DealWeight: v12.DealWeight,
|
||||
VerifiedDealWeight: v12.VerifiedDealWeight,
|
||||
InitialPledge: v12.InitialPledge,
|
||||
ExpectedDayReward: v12.ExpectedDayReward,
|
||||
ExpectedStoragePledge: v12.ExpectedStoragePledge,
|
||||
|
||||
SectorKeyCID: v12.SectorKeyCID,
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func fromV12SectorPreCommitOnChainInfo(v12 miner12.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||
ret := SectorPreCommitOnChainInfo{
|
||||
Info: SectorPreCommitInfo{
|
||||
SealProof: v12.Info.SealProof,
|
||||
SectorNumber: v12.Info.SectorNumber,
|
||||
SealedCID: v12.Info.SealedCID,
|
||||
SealRandEpoch: v12.Info.SealRandEpoch,
|
||||
DealIDs: v12.Info.DealIDs,
|
||||
Expiration: v12.Info.Expiration,
|
||||
UnsealedCid: nil,
|
||||
},
|
||||
PreCommitDeposit: v12.PreCommitDeposit,
|
||||
PreCommitEpoch: v12.PreCommitEpoch,
|
||||
}
|
||||
|
||||
ret.Info.UnsealedCid = v12.Info.UnsealedCid
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.MinerKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
4
chain/actors/builtin/multisig/message10.go
generated
4
chain/actors/builtin/multisig/message10.go
generated
@ -8,7 +8,7 @@ import (
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig"
|
||||
init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
|
||||
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -57,7 +57,7 @@ func (m message10) Create(
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init11.ExecParams{
|
||||
execParams := &init12.ExecParams{
|
||||
CodeCID: code,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
4
chain/actors/builtin/multisig/message11.go
generated
4
chain/actors/builtin/multisig/message11.go
generated
@ -7,8 +7,8 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
|
||||
multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig"
|
||||
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
@ -57,7 +57,7 @@ func (m message11) Create(
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init11.ExecParams{
|
||||
execParams := &init12.ExecParams{
|
||||
CodeCID: code,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
77
chain/actors/builtin/multisig/message12.go
generated
Normal file
77
chain/actors/builtin/multisig/message12.go
generated
Normal file
@ -0,0 +1,77 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message12 struct{ message0 }
|
||||
|
||||
func (m message12) Create(
|
||||
signers []address.Address, threshold uint64,
|
||||
unlockStart, unlockDuration abi.ChainEpoch,
|
||||
initialAmount abi.TokenAmount,
|
||||
) (*types.Message, error) {
|
||||
|
||||
lenAddrs := uint64(len(signers))
|
||||
|
||||
if lenAddrs < threshold {
|
||||
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
|
||||
}
|
||||
|
||||
if threshold == 0 {
|
||||
threshold = lenAddrs
|
||||
}
|
||||
|
||||
if m.from == address.Undef {
|
||||
return nil, xerrors.Errorf("must provide source address")
|
||||
}
|
||||
|
||||
// Set up constructor parameters for multisig
|
||||
msigParams := &multisig12.ConstructorParams{
|
||||
Signers: signers,
|
||||
NumApprovalsThreshold: threshold,
|
||||
UnlockDuration: unlockDuration,
|
||||
StartEpoch: unlockStart,
|
||||
}
|
||||
|
||||
enc, actErr := actors.SerializeParams(msigParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
code, ok := actors.GetActorCodeID(actorstypes.Version12, manifest.MultisigKey)
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("failed to get multisig code ID")
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init12.ExecParams{
|
||||
CodeCID: code,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
||||
enc, actErr = actors.SerializeParams(execParams)
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Method: builtintypes.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
Value: initialAmount,
|
||||
}, nil
|
||||
}
|
4
chain/actors/builtin/multisig/message8.go
generated
4
chain/actors/builtin/multisig/message8.go
generated
@ -7,7 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
|
||||
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
@ -57,7 +57,7 @@ func (m message8) Create(
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init11.ExecParams{
|
||||
execParams := &init12.ExecParams{
|
||||
CodeCID: code,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
4
chain/actors/builtin/multisig/message9.go
generated
4
chain/actors/builtin/multisig/message9.go
generated
@ -7,7 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
|
||||
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
@ -57,7 +57,7 @@ func (m message9) Create(
|
||||
}
|
||||
|
||||
// new actors are created by invoking 'exec' on the init actor with the constructor params
|
||||
execParams := &init11.ExecParams{
|
||||
execParams := &init12.ExecParams{
|
||||
CodeCID: code,
|
||||
ConstructorParams: enc,
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtintypes "github.com/filecoin-project/go-state-types/builtin"
|
||||
msig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig"
|
||||
msig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
@ -48,6 +48,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,6 +118,9 @@ func MakeState(store adt.Store, av actorstypes.Version, signers []address.Addres
|
||||
case actorstypes.Version11:
|
||||
return make11(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -141,7 +147,7 @@ type State interface {
|
||||
GetState() interface{}
|
||||
}
|
||||
|
||||
type Transaction = msig11.Transaction
|
||||
type Transaction = msig12.Transaction
|
||||
|
||||
var Methods = builtintypes.MethodsMultisig
|
||||
|
||||
@ -180,6 +186,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder {
|
||||
|
||||
case actorstypes.Version11:
|
||||
return message11{message0{from}}
|
||||
|
||||
case actorstypes.Version12:
|
||||
return message12{message0{from}}
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
@ -203,13 +212,13 @@ type MessageBuilder interface {
|
||||
}
|
||||
|
||||
// this type is the same between v0 and v2
|
||||
type ProposalHashData = msig11.ProposalHashData
|
||||
type ProposeReturn = msig11.ProposeReturn
|
||||
type ProposeParams = msig11.ProposeParams
|
||||
type ApproveReturn = msig11.ApproveReturn
|
||||
type ProposalHashData = msig12.ProposalHashData
|
||||
type ProposeReturn = msig12.ProposeReturn
|
||||
type ProposeParams = msig12.ProposeParams
|
||||
type ApproveReturn = msig12.ApproveReturn
|
||||
|
||||
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
|
||||
params := msig11.TxnIDParams{ID: msig11.TxnID(id)}
|
||||
params := msig12.TxnIDParams{ID: msig12.TxnID(id)}
|
||||
if data != nil {
|
||||
if data.Requester.Protocol() != address.ID {
|
||||
return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester)
|
||||
@ -244,5 +253,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
138
chain/actors/builtin/multisig/v12.go
generated
Normal file
138
chain/actors/builtin/multisig/v12.go
generated
Normal file
@ -0,0 +1,138 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
msig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
|
||||
out := state12{store: store}
|
||||
out.State = msig12.State{}
|
||||
out.State.Signers = signers
|
||||
out.State.NumApprovalsThreshold = threshold
|
||||
out.State.StartEpoch = startEpoch
|
||||
out.State.UnlockDuration = unlockDuration
|
||||
out.State.InitialBalance = initialBalance
|
||||
|
||||
em, err := adt12.StoreEmptyMap(store, builtin12.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State.PendingTxns = em
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
msig12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
|
||||
}
|
||||
|
||||
func (s *state12) StartEpoch() (abi.ChainEpoch, error) {
|
||||
return s.State.StartEpoch, nil
|
||||
}
|
||||
|
||||
func (s *state12) UnlockDuration() (abi.ChainEpoch, error) {
|
||||
return s.State.UnlockDuration, nil
|
||||
}
|
||||
|
||||
func (s *state12) InitialBalance() (abi.TokenAmount, error) {
|
||||
return s.State.InitialBalance, nil
|
||||
}
|
||||
|
||||
func (s *state12) Threshold() (uint64, error) {
|
||||
return s.State.NumApprovalsThreshold, nil
|
||||
}
|
||||
|
||||
func (s *state12) Signers() ([]address.Address, error) {
|
||||
return s.State.Signers, nil
|
||||
}
|
||||
|
||||
func (s *state12) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
|
||||
arr, err := adt12.AsMap(s.store, s.State.PendingTxns, builtin12.DefaultHamtBitwidth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var out msig12.Transaction
|
||||
return arr.ForEach(&out, func(key string) error {
|
||||
txid, n := binary.Varint([]byte(key))
|
||||
if n <= 0 {
|
||||
return xerrors.Errorf("invalid pending transaction key: %v", key)
|
||||
}
|
||||
return cb(txid, (Transaction)(out)) //nolint:unconvert
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state12) PendingTxnChanged(other State) (bool, error) {
|
||||
other12, ok := other.(*state12)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.PendingTxns.Equals(other12.PendingTxns), nil
|
||||
}
|
||||
|
||||
func (s *state12) transactions() (adt.Map, error) {
|
||||
return adt12.AsMap(s.store, s.PendingTxns, builtin12.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state12) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
|
||||
var tx msig12.Transaction
|
||||
if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Transaction{}, err
|
||||
}
|
||||
return Transaction(tx), nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.MultisigKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
109
chain/actors/builtin/paych/message12.go
generated
Normal file
109
chain/actors/builtin/paych/message12.go
generated
Normal file
@ -0,0 +1,109 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
|
||||
paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type message12 struct{ from address.Address }
|
||||
|
||||
func (m message12) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) {
|
||||
|
||||
actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version12, "paymentchannel")
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("error getting actor paymentchannel code id for actor version %d", 12)
|
||||
}
|
||||
|
||||
params, aerr := actors.SerializeParams(&paych12.ConstructorParams{From: m.from, To: to})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
enc, aerr := actors.SerializeParams(&init12.ExecParams{
|
||||
CodeCID: actorCodeID,
|
||||
ConstructorParams: params,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: init_.Address,
|
||||
From: m.from,
|
||||
Value: initialAmount,
|
||||
Method: builtin12.MethodsInit.Exec,
|
||||
Params: enc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message12) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) {
|
||||
params, aerr := actors.SerializeParams(&paych12.UpdateChannelStateParams{
|
||||
|
||||
Sv: toV12SignedVoucher(*sv),
|
||||
|
||||
Secret: secret,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin12.MethodsPaych.UpdateChannelState,
|
||||
Params: params,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toV12SignedVoucher(sv paychtypes.SignedVoucher) paych12.SignedVoucher {
|
||||
merges := make([]paych12.Merge, len(sv.Merges))
|
||||
for i := range sv.Merges {
|
||||
merges[i] = paych12.Merge{
|
||||
Lane: sv.Merges[i].Lane,
|
||||
Nonce: sv.Merges[i].Nonce,
|
||||
}
|
||||
}
|
||||
|
||||
return paych12.SignedVoucher{
|
||||
ChannelAddr: sv.ChannelAddr,
|
||||
TimeLockMin: sv.TimeLockMin,
|
||||
TimeLockMax: sv.TimeLockMax,
|
||||
SecretHash: sv.SecretHash,
|
||||
Extra: (*paych12.ModVerifyParams)(sv.Extra),
|
||||
Lane: sv.Lane,
|
||||
Nonce: sv.Nonce,
|
||||
Amount: sv.Amount,
|
||||
MinSettleHeight: sv.MinSettleHeight,
|
||||
Merges: merges,
|
||||
Signature: sv.Signature,
|
||||
}
|
||||
}
|
||||
|
||||
func (m message12) Settle(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin12.MethodsPaych.Settle,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m message12) Collect(paych address.Address) (*types.Message, error) {
|
||||
return &types.Message{
|
||||
To: paych,
|
||||
From: m.from,
|
||||
Value: abi.NewTokenAmount(0),
|
||||
Method: builtin12.MethodsPaych.Collect,
|
||||
}, nil
|
||||
}
|
@ -50,6 +50,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,6 +170,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder {
|
||||
case actorstypes.Version11:
|
||||
return message11{from}
|
||||
|
||||
case actorstypes.Version12:
|
||||
return message12{from}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported actors version: %d", version))
|
||||
}
|
||||
@ -208,5 +214,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
135
chain/actors/builtin/paych/v12.go
generated
Normal file
135
chain/actors/builtin/paych/v12.go
generated
Normal file
@ -0,0 +1,135 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store) (State, error) {
|
||||
out := state12{store: store}
|
||||
out.State = paych12.State{}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
paych12.State
|
||||
store adt.Store
|
||||
lsAmt *adt12.Array
|
||||
}
|
||||
|
||||
// Channel owner, who has funded the actor
|
||||
func (s *state12) From() (address.Address, error) {
|
||||
return s.State.From, nil
|
||||
}
|
||||
|
||||
// Recipient of payouts from channel
|
||||
func (s *state12) To() (address.Address, error) {
|
||||
return s.State.To, nil
|
||||
}
|
||||
|
||||
// Height at which the channel can be `Collected`
|
||||
func (s *state12) SettlingAt() (abi.ChainEpoch, error) {
|
||||
return s.State.SettlingAt, nil
|
||||
}
|
||||
|
||||
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||
func (s *state12) ToSend() (abi.TokenAmount, error) {
|
||||
return s.State.ToSend, nil
|
||||
}
|
||||
|
||||
func (s *state12) getOrLoadLsAmt() (*adt12.Array, error) {
|
||||
if s.lsAmt != nil {
|
||||
return s.lsAmt, nil
|
||||
}
|
||||
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := adt12.AsArray(s.store, s.State.LaneStates, paych12.LaneStatesAmtBitwidth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.lsAmt = lsamt
|
||||
return lsamt, nil
|
||||
}
|
||||
|
||||
// Get total number of lanes
|
||||
func (s *state12) LaneCount() (uint64, error) {
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lsamt.Length(), nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
// Iterate lane states
|
||||
func (s *state12) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: we use a map instead of an array to store laneStates because the
|
||||
// client sets the lane ID (the index) and potentially they could use a
|
||||
// very large index.
|
||||
var ls paych12.LaneState
|
||||
return lsamt.ForEach(&ls, func(i int64) error {
|
||||
return cb(uint64(i), &laneState12{ls})
|
||||
})
|
||||
}
|
||||
|
||||
type laneState12 struct {
|
||||
paych12.LaneState
|
||||
}
|
||||
|
||||
func (ls *laneState12) Redeemed() (big.Int, error) {
|
||||
return ls.LaneState.Redeemed, nil
|
||||
}
|
||||
|
||||
func (ls *laneState12) Nonce() (uint64, error) {
|
||||
return ls.LaneState.Nonce, nil
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.PaychKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -9,7 +9,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
@ -27,8 +27,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin11.StoragePowerActorAddr
|
||||
Methods = builtin11.MethodsPower
|
||||
Address = builtin12.StoragePowerActorAddr
|
||||
Methods = builtin12.MethodsPower
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -51,6 +51,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -118,6 +121,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return make11(store)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -183,5 +189,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
207
chain/actors/builtin/power/v12.go
generated
Normal file
207
chain/actors/builtin/power/v12.go
generated
Normal file
@ -0,0 +1,207 @@
|
||||
package power
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
power12 "github.com/filecoin-project/go-state-types/builtin/v12/power"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store) (State, error) {
|
||||
out := state12{store: store}
|
||||
|
||||
s, err := power12.ConstructState(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
power12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) TotalLocked() (abi.TokenAmount, error) {
|
||||
return s.TotalPledgeCollateral, nil
|
||||
}
|
||||
|
||||
func (s *state12) TotalPower() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalRawBytePower,
|
||||
QualityAdjPower: s.TotalQualityAdjPower,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Committed power to the network. Includes miners below the minimum threshold.
|
||||
func (s *state12) TotalCommitted() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalBytesCommitted,
|
||||
QualityAdjPower: s.TotalQABytesCommitted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state12) MinerPower(addr address.Address) (Claim, bool, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
var claim power12.Claim
|
||||
ok, err := claims.Get(abi.AddrKey(addr), &claim)
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
return Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
}, ok, nil
|
||||
}
|
||||
|
||||
func (s *state12) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
|
||||
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
|
||||
}
|
||||
|
||||
func (s *state12) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil
|
||||
}
|
||||
|
||||
func (s *state12) MinerCounts() (uint64, uint64, error) {
|
||||
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
|
||||
}
|
||||
|
||||
func (s *state12) ListAllMiners() ([]address.Address, error) {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var miners []address.Address
|
||||
err = claims.ForEach(nil, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
miners = append(miners, a)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return miners, nil
|
||||
}
|
||||
|
||||
func (s *state12) ForEachClaim(cb func(miner address.Address, claim Claim) error) error {
|
||||
claims, err := s.claims()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var claim power12.Claim
|
||||
return claims.ForEach(&claim, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state12) ClaimsChanged(other State) (bool, error) {
|
||||
other12, ok := other.(*state12)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Claims.Equals(other12.State.Claims), nil
|
||||
}
|
||||
|
||||
func (s *state12) SetTotalQualityAdjPower(p abi.StoragePower) error {
|
||||
s.State.TotalQualityAdjPower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) SetTotalRawBytePower(p abi.StoragePower) error {
|
||||
s.State.TotalRawBytePower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) SetThisEpochQualityAdjPower(p abi.StoragePower) error {
|
||||
s.State.ThisEpochQualityAdjPower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) SetThisEpochRawBytePower(p abi.StoragePower) error {
|
||||
s.State.ThisEpochRawBytePower = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) claims() (adt.Map, error) {
|
||||
return adt12.AsMap(s.store, s.Claims, builtin12.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state12) decodeClaim(val *cbg.Deferred) (Claim, error) {
|
||||
var ci power12.Claim
|
||||
if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return Claim{}, err
|
||||
}
|
||||
return fromV12Claim(ci), nil
|
||||
}
|
||||
|
||||
func fromV12Claim(v12 power12.Claim) Claim {
|
||||
return Claim{
|
||||
RawBytePower: v12.RawBytePower,
|
||||
QualityAdjPower: v12.QualityAdjPower,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.PowerKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -42,6 +42,22 @@ import (
|
||||
reward11 "github.com/filecoin-project/go-state-types/builtin/v11/reward"
|
||||
system11 "github.com/filecoin-project/go-state-types/builtin/v11/system"
|
||||
verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg"
|
||||
account12 "github.com/filecoin-project/go-state-types/builtin/v12/account"
|
||||
cron12 "github.com/filecoin-project/go-state-types/builtin/v12/cron"
|
||||
datacap12 "github.com/filecoin-project/go-state-types/builtin/v12/datacap"
|
||||
eam12 "github.com/filecoin-project/go-state-types/builtin/v12/eam"
|
||||
ethaccount12 "github.com/filecoin-project/go-state-types/builtin/v12/ethaccount"
|
||||
evm12 "github.com/filecoin-project/go-state-types/builtin/v12/evm"
|
||||
_init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
|
||||
market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
|
||||
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
|
||||
multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
|
||||
paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
|
||||
placeholder12 "github.com/filecoin-project/go-state-types/builtin/v12/placeholder"
|
||||
power12 "github.com/filecoin-project/go-state-types/builtin/v12/power"
|
||||
reward12 "github.com/filecoin-project/go-state-types/builtin/v12/reward"
|
||||
system12 "github.com/filecoin-project/go-state-types/builtin/v12/system"
|
||||
verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
|
||||
account8 "github.com/filecoin-project/go-state-types/builtin/v8/account"
|
||||
cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron"
|
||||
_init8 "github.com/filecoin-project/go-state-types/builtin/v8/init"
|
||||
@ -497,6 +513,110 @@ func MakeRegistry(av actorstypes.Version) []RegistryEntry {
|
||||
}
|
||||
}
|
||||
|
||||
case actorstypes.Version12:
|
||||
for key, codeID := range codeIDs {
|
||||
switch key {
|
||||
case manifest.AccountKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: account12.Methods,
|
||||
state: new(account12.State),
|
||||
})
|
||||
case manifest.CronKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: cron12.Methods,
|
||||
state: new(cron12.State),
|
||||
})
|
||||
case manifest.InitKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: _init12.Methods,
|
||||
state: new(_init12.State),
|
||||
})
|
||||
case manifest.MarketKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: market12.Methods,
|
||||
state: new(market12.State),
|
||||
})
|
||||
case manifest.MinerKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: miner12.Methods,
|
||||
state: new(miner12.State),
|
||||
})
|
||||
case manifest.MultisigKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: multisig12.Methods,
|
||||
state: new(multisig12.State),
|
||||
})
|
||||
case manifest.PaychKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: paych12.Methods,
|
||||
state: new(paych12.State),
|
||||
})
|
||||
case manifest.PowerKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: power12.Methods,
|
||||
state: new(power12.State),
|
||||
})
|
||||
case manifest.RewardKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: reward12.Methods,
|
||||
state: new(reward12.State),
|
||||
})
|
||||
case manifest.SystemKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: system12.Methods,
|
||||
state: new(system12.State),
|
||||
})
|
||||
case manifest.VerifregKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: verifreg12.Methods,
|
||||
state: new(verifreg12.State),
|
||||
})
|
||||
case manifest.DatacapKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: datacap12.Methods,
|
||||
state: new(datacap12.State),
|
||||
})
|
||||
|
||||
case manifest.EvmKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: evm12.Methods,
|
||||
state: new(evm12.State),
|
||||
})
|
||||
case manifest.EamKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: eam12.Methods,
|
||||
state: nil,
|
||||
})
|
||||
case manifest.PlaceholderKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: placeholder12.Methods,
|
||||
state: nil,
|
||||
})
|
||||
case manifest.EthAccountKey:
|
||||
registry = append(registry, RegistryEntry{
|
||||
code: codeID,
|
||||
methods: ethaccount12.Methods,
|
||||
state: nil,
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
panic("expected version v8 and up only, use specs-actors for v0-7")
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
@ -25,8 +25,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin11.RewardActorAddr
|
||||
Methods = builtin11.MethodsReward
|
||||
Address = builtin12.RewardActorAddr
|
||||
Methods = builtin12.MethodsReward
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -49,6 +49,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,6 +119,9 @@ func MakeState(store adt.Store, av actorstypes.Version, currRealizedPower abi.St
|
||||
case actorstypes.Version11:
|
||||
return make11(store, currRealizedPower)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, currRealizedPower)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -159,5 +165,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
120
chain/actors/builtin/reward/v12.go
generated
Normal file
120
chain/actors/builtin/reward/v12.go
generated
Normal file
@ -0,0 +1,120 @@
|
||||
package reward
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
|
||||
reward12 "github.com/filecoin-project/go-state-types/builtin/v12/reward"
|
||||
smoothing12 "github.com/filecoin-project/go-state-types/builtin/v12/util/smoothing"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, currRealizedPower abi.StoragePower) (State, error) {
|
||||
out := state12{store: store}
|
||||
out.State = *reward12.ConstructState(currRealizedPower)
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
reward12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) ThisEpochReward() (abi.TokenAmount, error) {
|
||||
return s.State.ThisEpochReward, nil
|
||||
}
|
||||
|
||||
func (s *state12) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
|
||||
|
||||
return builtin.FilterEstimate{
|
||||
PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate,
|
||||
VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) ThisEpochBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.ThisEpochBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state12) TotalStoragePowerReward() (abi.TokenAmount, error) {
|
||||
return s.State.TotalStoragePowerReward, nil
|
||||
}
|
||||
|
||||
func (s *state12) EffectiveBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.EffectiveBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state12) EffectiveNetworkTime() (abi.ChainEpoch, error) {
|
||||
return s.State.EffectiveNetworkTime, nil
|
||||
}
|
||||
|
||||
func (s *state12) CumsumBaseline() (reward12.Spacetime, error) {
|
||||
return s.State.CumsumBaseline, nil
|
||||
}
|
||||
|
||||
func (s *state12) CumsumRealized() (reward12.Spacetime, error) {
|
||||
return s.State.CumsumRealized, nil
|
||||
}
|
||||
|
||||
func (s *state12) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
return miner12.InitialPledgeForPower(
|
||||
qaPower,
|
||||
s.State.ThisEpochBaselinePower,
|
||||
s.State.ThisEpochRewardSmoothed,
|
||||
smoothing12.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
circSupply,
|
||||
), nil
|
||||
}
|
||||
|
||||
func (s *state12) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
|
||||
return miner12.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
|
||||
smoothing12.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
sectorWeight), nil
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.RewardKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -5,7 +5,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
@ -21,7 +21,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin11.SystemActorAddr
|
||||
Address = builtin12.SystemActorAddr
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -44,6 +44,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -111,6 +114,9 @@ func MakeState(store adt.Store, av actorstypes.Version, builtinActors cid.Cid) (
|
||||
case actorstypes.Version11:
|
||||
return make11(store, builtinActors)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, builtinActors)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -138,5 +144,6 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
72
chain/actors/builtin/system/v12.go
generated
Normal file
72
chain/actors/builtin/system/v12.go
generated
Normal file
@ -0,0 +1,72 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
system12 "github.com/filecoin-project/go-state-types/builtin/v12/system"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, builtinActors cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
out.State = system12.State{
|
||||
BuiltinActors: builtinActors,
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
system12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) GetBuiltinActors() cid.Cid {
|
||||
|
||||
return s.State.BuiltinActors
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) SetBuiltinActors(c cid.Cid) error {
|
||||
|
||||
s.State.BuiltinActors = c
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.SystemKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
170
chain/actors/builtin/verifreg/v12.go
generated
Normal file
170
chain/actors/builtin/verifreg/v12.go
generated
Normal file
@ -0,0 +1,170 @@
|
||||
package verifreg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
|
||||
verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
|
||||
verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state12)(nil)
|
||||
|
||||
func load12(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state12{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make12(store adt.Store, rootKeyAddress address.Address) (State, error) {
|
||||
out := state12{store: store}
|
||||
|
||||
s, err := verifreg12.ConstructState(store, rootKeyAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out.State = *s
|
||||
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state12 struct {
|
||||
verifreg12.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state12) RootKey() (address.Address, error) {
|
||||
return s.State.RootKey, nil
|
||||
}
|
||||
|
||||
func (s *state12) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
|
||||
return false, big.Zero(), xerrors.Errorf("unsupported in actors v12")
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, actors.Version12, s.verifiers, addr)
|
||||
}
|
||||
|
||||
func (s *state12) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) {
|
||||
return getRemoveDataCapProposalID(s.store, actors.Version12, s.removeDataCapProposalIDs, verifier, client)
|
||||
}
|
||||
|
||||
func (s *state12) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, actors.Version12, s.verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state12) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
|
||||
return xerrors.Errorf("unsupported in actors v12")
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) verifiedClients() (adt.Map, error) {
|
||||
|
||||
return nil, xerrors.Errorf("unsupported in actors v12")
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) verifiers() (adt.Map, error) {
|
||||
return adt12.AsMap(s.store, s.Verifiers, builtin12.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state12) removeDataCapProposalIDs() (adt.Map, error) {
|
||||
return adt12.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin12.DefaultHamtBitwidth)
|
||||
}
|
||||
|
||||
func (s *state12) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state12) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*Allocation, bool, error) {
|
||||
|
||||
alloc, ok, err := s.FindAllocation(s.store, clientIdAddr, verifreg12.AllocationId(allocationId))
|
||||
return (*Allocation)(alloc), ok, err
|
||||
}
|
||||
|
||||
func (s *state12) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) {
|
||||
|
||||
v12Map, err := s.LoadAllocationsToMap(s.store, clientIdAddr)
|
||||
|
||||
retMap := make(map[AllocationId]Allocation, len(v12Map))
|
||||
for k, v := range v12Map {
|
||||
retMap[AllocationId(k)] = Allocation(v)
|
||||
}
|
||||
|
||||
return retMap, err
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) {
|
||||
|
||||
claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg12.ClaimId(claimId))
|
||||
return (*Claim)(claim), ok, err
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) {
|
||||
|
||||
v12Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
|
||||
|
||||
retMap := make(map[ClaimId]Claim, len(v12Map))
|
||||
for k, v := range v12Map {
|
||||
retMap[ClaimId(k)] = Claim(v)
|
||||
}
|
||||
|
||||
return retMap, err
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) {
|
||||
|
||||
v12Map, err := s.LoadClaimsToMap(s.store, providerIdAddr)
|
||||
|
||||
retMap := make(map[abi.SectorNumber][]ClaimId)
|
||||
for k, v := range v12Map {
|
||||
claims, ok := retMap[v.Sector]
|
||||
if !ok {
|
||||
retMap[v.Sector] = []ClaimId{ClaimId(k)}
|
||||
} else {
|
||||
retMap[v.Sector] = append(claims, ClaimId(k))
|
||||
}
|
||||
}
|
||||
|
||||
return retMap, err
|
||||
|
||||
}
|
||||
|
||||
func (s *state12) ActorKey() string {
|
||||
return manifest.VerifregKey
|
||||
}
|
||||
|
||||
func (s *state12) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version12
|
||||
}
|
||||
|
||||
func (s *state12) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
13
chain/actors/builtin/verifreg/verifreg.go
generated
13
chain/actors/builtin/verifreg/verifreg.go
generated
@ -7,7 +7,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
@ -25,8 +25,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin11.VerifiedRegistryActorAddr
|
||||
Methods = builtin11.MethodsVerifiedRegistry
|
||||
Address = builtin12.VerifiedRegistryActorAddr
|
||||
Methods = builtin12.MethodsVerifiedRegistry
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -49,6 +49,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
case actorstypes.Version11:
|
||||
return load11(store, act.Head)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return load12(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,6 +119,9 @@ func MakeState(store adt.Store, av actorstypes.Version, rootKeyAddress address.A
|
||||
case actorstypes.Version11:
|
||||
return make11(store, rootKeyAddress)
|
||||
|
||||
case actorstypes.Version12:
|
||||
return make12(store, rootKeyAddress)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -154,6 +160,7 @@ func AllCodes() []cid.Cid {
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
(&state11{}).Code(),
|
||||
(&state12{}).Code(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
builtin10 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin11 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin12 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin8 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin9 "github.com/filecoin-project/go-state-types/builtin"
|
||||
market10 "github.com/filecoin-project/go-state-types/builtin/v10/market"
|
||||
@ -15,8 +16,11 @@ import (
|
||||
verifreg10 "github.com/filecoin-project/go-state-types/builtin/v10/verifreg"
|
||||
market11 "github.com/filecoin-project/go-state-types/builtin/v11/market"
|
||||
miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
|
||||
paych11 "github.com/filecoin-project/go-state-types/builtin/v11/paych"
|
||||
verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg"
|
||||
market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
|
||||
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
|
||||
paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
|
||||
verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
|
||||
market8 "github.com/filecoin-project/go-state-types/builtin/v8/market"
|
||||
miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg"
|
||||
@ -55,14 +59,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ChainFinality = miner11.ChainFinality
|
||||
ChainFinality = miner12.ChainFinality
|
||||
SealRandomnessLookback = ChainFinality
|
||||
PaychSettleDelay = paych11.SettleDelay
|
||||
MaxPreCommitRandomnessLookback = builtin11.EpochsInDay + SealRandomnessLookback
|
||||
PaychSettleDelay = paych12.SettleDelay
|
||||
MaxPreCommitRandomnessLookback = builtin12.EpochsInDay + SealRandomnessLookback
|
||||
)
|
||||
|
||||
var (
|
||||
MarketDefaultAllocationTermBuffer = market11.MarketDefaultAllocationTermBuffer
|
||||
MarketDefaultAllocationTermBuffer = market12.MarketDefaultAllocationTermBuffer
|
||||
)
|
||||
|
||||
// SetSupportedProofTypes sets supported proof types, across all actor versions.
|
||||
@ -175,11 +179,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
|
||||
|
||||
miner11.PreCommitChallengeDelay = delay
|
||||
|
||||
miner12.PreCommitChallengeDelay = delay
|
||||
|
||||
}
|
||||
|
||||
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
|
||||
func GetPreCommitChallengeDelay() abi.ChainEpoch {
|
||||
return miner11.PreCommitChallengeDelay
|
||||
return miner12.PreCommitChallengeDelay
|
||||
}
|
||||
|
||||
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
|
||||
@ -229,6 +235,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
|
||||
for _, policy := range builtin12.PoStProofPolicies {
|
||||
policy.ConsensusMinerMinPower = p
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
|
||||
@ -257,6 +267,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) {
|
||||
|
||||
verifreg11.MinVerifiedDealSize = size
|
||||
|
||||
verifreg12.MinVerifiedDealSize = size
|
||||
|
||||
}
|
||||
|
||||
func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) {
|
||||
@ -306,6 +318,10 @@ func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProo
|
||||
|
||||
return miner11.MaxProveCommitDuration[t], nil
|
||||
|
||||
case actorstypes.Version12:
|
||||
|
||||
return miner12.MaxProveCommitDuration[t], nil
|
||||
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported actors version")
|
||||
}
|
||||
@ -366,6 +382,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) {
|
||||
Denominator: denom,
|
||||
}
|
||||
|
||||
market12.ProviderCollateralSupplyTarget = builtin12.BigFrac{
|
||||
Numerator: num,
|
||||
Denominator: denom,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func DealProviderCollateralBounds(
|
||||
@ -434,13 +455,18 @@ func DealProviderCollateralBounds(
|
||||
min, max := market11.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
return min, max, nil
|
||||
|
||||
case actorstypes.Version12:
|
||||
|
||||
min, max := market12.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
|
||||
return min, max, nil
|
||||
|
||||
default:
|
||||
return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version")
|
||||
}
|
||||
}
|
||||
|
||||
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
|
||||
return market11.DealDurationBounds(pieceSize)
|
||||
return market12.DealDurationBounds(pieceSize)
|
||||
}
|
||||
|
||||
// Sets the challenge window and scales the proving period to match (such that
|
||||
@ -516,6 +542,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) {
|
||||
// scale it if we're scaling the challenge period.
|
||||
miner11.WPoStDisputeWindow = period * 30
|
||||
|
||||
miner12.WPoStChallengeWindow = period
|
||||
miner12.WPoStProvingPeriod = period * abi.ChainEpoch(miner12.WPoStPeriodDeadlines)
|
||||
|
||||
// by default, this is 2x finality which is 30 periods.
|
||||
// scale it if we're scaling the challenge period.
|
||||
miner12.WPoStDisputeWindow = period * 30
|
||||
|
||||
}
|
||||
|
||||
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
||||
@ -528,15 +561,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
|
||||
}
|
||||
|
||||
func GetMaxSectorExpirationExtension() abi.ChainEpoch {
|
||||
return miner11.MaxSectorExpirationExtension
|
||||
return miner12.MaxSectorExpirationExtension
|
||||
}
|
||||
|
||||
func GetMinSectorExpiration() abi.ChainEpoch {
|
||||
return miner11.MinSectorExpiration
|
||||
return miner12.MinSectorExpiration
|
||||
}
|
||||
|
||||
func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
|
||||
sectorsPerPart, err := builtin11.PoStProofWindowPoStPartitionSectors(p)
|
||||
sectorsPerPart, err := builtin12.PoStProofWindowPoStPartitionSectors(p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -556,7 +589,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version)
|
||||
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
|
||||
}
|
||||
|
||||
return builtin11.SealProofPoliciesV11[proof].SectorMaxLifetime
|
||||
return builtin12.SealProofPoliciesV11[proof].SectorMaxLifetime
|
||||
}
|
||||
|
||||
func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
|
||||
@ -599,6 +632,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
|
||||
case actorstypes.Version11:
|
||||
return miner11.AddressedSectorsMax, nil
|
||||
|
||||
case actorstypes.Version12:
|
||||
return miner12.AddressedSectorsMax, nil
|
||||
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
@ -656,6 +692,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) {
|
||||
|
||||
return miner11.DeclarationsMax, nil
|
||||
|
||||
case actorstypes.Version12:
|
||||
|
||||
return miner12.DeclarationsMax, nil
|
||||
|
||||
default:
|
||||
return 0, xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
@ -712,6 +752,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba
|
||||
|
||||
return miner11.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
|
||||
case actorstypes.Version12:
|
||||
|
||||
return miner12.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
|
||||
default:
|
||||
return big.Zero(), xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
@ -768,6 +812,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base
|
||||
|
||||
return miner11.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
|
||||
case actorstypes.Version12:
|
||||
|
||||
return miner12.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
|
||||
|
||||
default:
|
||||
return big.Zero(), xerrors.Errorf("unsupported network version")
|
||||
}
|
||||
|
@ -14,9 +14,9 @@ const ({{range .actorVersions}}
|
||||
|
||||
/* inline-gen start */
|
||||
|
||||
var LatestVersion = 11
|
||||
var LatestVersion = 12
|
||||
|
||||
var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
|
||||
var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
|
||||
|
||||
const (
|
||||
Version0 Version = 0
|
||||
@ -30,6 +30,7 @@ const (
|
||||
Version9 Version = 9
|
||||
Version10 Version = 10
|
||||
Version11 Version = 11
|
||||
Version12 Version = 12
|
||||
)
|
||||
|
||||
/* inline-gen end */
|
||||
|
@ -3,14 +3,14 @@ package chain
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/hashicorp/golang-lru/arc/v2"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
)
|
||||
|
||||
type BadBlockCache struct {
|
||||
badBlocks *lru.ARCCache[cid.Cid, BadBlockReason]
|
||||
badBlocks *arc.ARCCache[cid.Cid, BadBlockReason]
|
||||
}
|
||||
|
||||
type BadBlockReason struct {
|
||||
@ -43,7 +43,7 @@ func (bbr BadBlockReason) String() string {
|
||||
}
|
||||
|
||||
func NewBadBlockCache() *BadBlockCache {
|
||||
cache, err := lru.NewARC[cid.Cid, BadBlockReason](build.BadBlockCacheSize)
|
||||
cache, err := arc.NewARC[cid.Cid, BadBlockReason](build.BadBlockCacheSize)
|
||||
if err != nil {
|
||||
panic(err) // ok
|
||||
}
|
||||
|
@ -29,19 +29,6 @@ import (
|
||||
|
||||
var log = logging.Logger("drand")
|
||||
|
||||
type drandPeer struct {
|
||||
addr string
|
||||
tls bool
|
||||
}
|
||||
|
||||
func (dp *drandPeer) Address() string {
|
||||
return dp.addr
|
||||
}
|
||||
|
||||
func (dp *drandPeer) IsTLS() bool {
|
||||
return dp.tls
|
||||
}
|
||||
|
||||
// DrandBeacon connects Lotus with a drand network in order to provide
|
||||
// randomness to the system in a way that's aligned with Filecoin rounds/epochs.
|
||||
//
|
||||
@ -235,3 +222,16 @@ func (db *DrandBeacon) maxBeaconRoundV2(latestTs uint64) uint64 {
|
||||
}
|
||||
|
||||
var _ beacon.RandomBeacon = (*DrandBeacon)(nil)
|
||||
|
||||
func BeaconScheduleFromDrandSchedule(dcs dtypes.DrandSchedule, genesisTime uint64, ps *pubsub.PubSub) (beacon.Schedule, error) {
|
||||
shd := beacon.Schedule{}
|
||||
for _, dc := range dcs {
|
||||
bc, err := NewDrandBeacon(genesisTime, build.BlockDelaySecs, ps, dc.Config)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating drand beacon: %w", err)
|
||||
}
|
||||
shd = append(shd, beacon.BeaconPoint{Start: dc.Start, Beacon: bc})
|
||||
}
|
||||
|
||||
return shd, nil
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ func NewActorRegistry() *vm.ActorRegistry {
|
||||
inv.Register(actorstypes.Version9, vm.ActorsVersionPredicate(actorstypes.Version9), builtin.MakeRegistry(actorstypes.Version9))
|
||||
inv.Register(actorstypes.Version10, vm.ActorsVersionPredicate(actorstypes.Version10), builtin.MakeRegistry(actorstypes.Version10))
|
||||
inv.Register(actorstypes.Version11, vm.ActorsVersionPredicate(actorstypes.Version11), builtin.MakeRegistry(actorstypes.Version11))
|
||||
inv.Register(actorstypes.Version12, vm.ActorsVersionPredicate(actorstypes.Version12), builtin.MakeRegistry(actorstypes.Version12))
|
||||
|
||||
return inv
|
||||
}
|
||||
@ -80,7 +81,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
|
||||
pstate cid.Cid,
|
||||
bms []FilecoinBlockMessages,
|
||||
epoch abi.ChainEpoch,
|
||||
r vm.Rand,
|
||||
r rand.Rand,
|
||||
em stmgr.ExecMonitor,
|
||||
vmTracing bool,
|
||||
baseFee abi.TokenAmount,
|
||||
@ -135,6 +136,10 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
|
||||
return xerrors.Errorf("running cron: %w", err)
|
||||
}
|
||||
|
||||
if !ret.ExitCode.IsSuccess() {
|
||||
return xerrors.Errorf("cron failed with exit code %d: %w", ret.ExitCode, ret.ActorErr)
|
||||
}
|
||||
|
||||
cronGas += ret.GasUsed
|
||||
|
||||
if em != nil {
|
||||
|
@ -80,6 +80,11 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito
|
||||
if actErr != nil {
|
||||
return xerrors.Errorf("failed to apply reward message: %w", actErr)
|
||||
}
|
||||
|
||||
if !ret.ExitCode.IsSuccess() {
|
||||
return xerrors.Errorf("reward actor failed with exit code %d: %w", ret.ExitCode, ret.ActorErr)
|
||||
}
|
||||
|
||||
if em != nil {
|
||||
if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil {
|
||||
return xerrors.Errorf("callback failed on reward message: %w", err)
|
||||
@ -196,7 +201,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
||||
return xerrors.Errorf("failed to marshal miner address to cbor: %w", err)
|
||||
}
|
||||
|
||||
vrfBase, err := rand.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
|
||||
vrfBase, err := rand.DrawRandomnessFromBase(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not draw randomness: %w", err)
|
||||
}
|
||||
@ -262,7 +267,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
|
||||
beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||
}
|
||||
|
||||
vrfBase, err := rand.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
|
||||
vrfBase, err := rand.DrawRandomnessFromBase(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
|
||||
}
|
||||
@ -340,7 +345,7 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network.
|
||||
rbase = h.BeaconEntries[len(h.BeaconEntries)-1]
|
||||
}
|
||||
|
||||
rand, err := rand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
||||
rand, err := rand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err)
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
nv18 "github.com/filecoin-project/go-state-types/builtin/v10/migration"
|
||||
nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration"
|
||||
nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration"
|
||||
nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration"
|
||||
"github.com/filecoin-project/go-state-types/manifest"
|
||||
"github.com/filecoin-project/go-state-types/migration"
|
||||
@ -261,6 +262,17 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
|
||||
Height: build.UpgradeThunderHeight,
|
||||
Network: network.Version20,
|
||||
Migration: nil,
|
||||
}, {
|
||||
Height: build.UpgradeWatermelonHeight,
|
||||
Network: network.Version21,
|
||||
Migration: UpgradeActorsV12,
|
||||
PreMigrations: []stmgr.PreMigration{{
|
||||
PreMigration: PreUpgradeActorsV12,
|
||||
StartWithin: 120,
|
||||
DontStartWithin: 15,
|
||||
StopWithin: 10,
|
||||
}},
|
||||
Expensive: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1814,6 +1826,108 @@ func upgradeActorsV11Common(
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func PreUpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error {
|
||||
// Use half the CPUs for pre-migration, but leave at least 3.
|
||||
workerCount := MigrationMaxWorkerCount
|
||||
if workerCount <= 4 {
|
||||
workerCount = 1
|
||||
} else {
|
||||
workerCount /= 2
|
||||
}
|
||||
|
||||
lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error getting lookback ts for premigration: %w", err)
|
||||
}
|
||||
|
||||
config := migration.Config{
|
||||
MaxWorkers: uint(workerCount),
|
||||
ProgressLogPeriod: time.Minute * 5,
|
||||
}
|
||||
|
||||
_, err = upgradeActorsV12Common(ctx, sm, cache, lbRoot, epoch, lbts, config)
|
||||
return err
|
||||
}
|
||||
|
||||
func UpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
|
||||
// Use all the CPUs except 2.
|
||||
workerCount := MigrationMaxWorkerCount - 3
|
||||
if workerCount <= 0 {
|
||||
workerCount = 1
|
||||
}
|
||||
config := migration.Config{
|
||||
MaxWorkers: uint(workerCount),
|
||||
JobQueueSize: 1000,
|
||||
ResultQueueSize: 100,
|
||||
ProgressLogPeriod: 10 * time.Second,
|
||||
}
|
||||
newRoot, err := upgradeActorsV12Common(ctx, sm, cache, root, epoch, ts, config)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err)
|
||||
}
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
func upgradeActorsV12Common(
|
||||
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
|
||||
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
|
||||
config migration.Config,
|
||||
) (cid.Cid, error) {
|
||||
writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4)
|
||||
adtStore := store.ActorStore(ctx, writeStore)
|
||||
// ensure that the manifest is loaded in the blockstore
|
||||
if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version12); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
|
||||
}
|
||||
|
||||
// Load the state root.
|
||||
var stateRoot types.StateRoot
|
||||
if err := adtStore.Get(ctx, root, &stateRoot); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err)
|
||||
}
|
||||
|
||||
if stateRoot.Version != types.StateTreeVersion5 {
|
||||
return cid.Undef, xerrors.Errorf(
|
||||
"expected state root version 5 for actors v12 upgrade, got %d",
|
||||
stateRoot.Version,
|
||||
)
|
||||
}
|
||||
|
||||
manifest, ok := actors.GetManifest(actorstypes.Version12)
|
||||
if !ok {
|
||||
return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade")
|
||||
}
|
||||
|
||||
// Perform the migration
|
||||
newHamtRoot, err := nv21.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config,
|
||||
migrationLogger{}, cache)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("upgrading to actors v12: %w", err)
|
||||
}
|
||||
|
||||
// Persist the result.
|
||||
newRoot, err := adtStore.Put(ctx, &types.StateRoot{
|
||||
Version: types.StateTreeVersion5,
|
||||
Actors: newHamtRoot,
|
||||
Info: stateRoot.Info,
|
||||
})
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
|
||||
}
|
||||
|
||||
// Persists the new tree and shuts down the flush worker
|
||||
if err := writeStore.Flush(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err)
|
||||
}
|
||||
|
||||
if err := writeStore.Shutdown(ctx); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err)
|
||||
}
|
||||
|
||||
return newRoot, nil
|
||||
}
|
||||
|
||||
// Example upgrade function if upgrade requires only code changes
|
||||
//func UpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, _ stmgr.ExecMonitor, root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
|
||||
// buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())
|
||||
|
@ -128,6 +128,16 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
// rollback the transaction (a no-op if the transaction was already committed)
|
||||
defer tx.Rollback() //nolint:errcheck
|
||||
|
||||
// create some temporary indices to help speed up the migration
|
||||
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err)
|
||||
}
|
||||
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
|
||||
}
|
||||
|
||||
stmtDeleteOffChainEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid!=? and height=?")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err)
|
||||
@ -158,12 +168,16 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
currTs := chainStore.GetHeaviestTipSet()
|
||||
|
||||
for int64(currTs.Height()) >= minHeight.Int64 {
|
||||
if currTs.Height()%1000 == 0 {
|
||||
log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64)
|
||||
}
|
||||
|
||||
tsKey := currTs.Parents()
|
||||
currTs, err = chainStore.GetTipSetFromKey(ctx, tsKey)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get tipset from key: %w", err)
|
||||
}
|
||||
log.Debugf("Migrating height %d\n", currTs.Height())
|
||||
log.Debugf("Migrating height %d", currTs.Height())
|
||||
|
||||
tsKeyCid, err := currTs.Key().Cid()
|
||||
if err != nil {
|
||||
@ -190,7 +204,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
if !eventId.Valid {
|
||||
continue
|
||||
}
|
||||
log.Debugf("Deleting all events with id < %d at height %d\n", eventId.Int64, currTs.Height())
|
||||
log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height())
|
||||
|
||||
res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64)
|
||||
if err != nil {
|
||||
@ -201,7 +215,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
if err != nil {
|
||||
return xerrors.Errorf("rows affected: %w", err)
|
||||
}
|
||||
log.Debugf("deleted %d events from tipset %s\n", nrRowsAffected, tsKeyCid.String())
|
||||
log.Debugf("deleted %d events from tipset %s", nrRowsAffected, tsKeyCid.String())
|
||||
}
|
||||
|
||||
// delete all entries that have an event_id that doesn't exist (since we don't have a foreign
|
||||
@ -217,11 +231,34 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
}
|
||||
log.Infof("cleaned up %d entries that had deleted events\n", nrRowsAffected)
|
||||
|
||||
// drop the temporary indices after the migration
|
||||
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_tipset_key_cid")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
|
||||
}
|
||||
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_height_tipset_key_cid")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err)
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("commit transaction: %w", err)
|
||||
}
|
||||
|
||||
// during the migration, we have likely increased the WAL size a lot, so lets do some
|
||||
// simple DB administration to free up space (VACUUM followed by truncating the WAL file)
|
||||
// as this would be a good time to do it when no other writes are happening
|
||||
log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
|
||||
_, err = ei.db.Exec("VACUUM")
|
||||
if err != nil {
|
||||
log.Warnf("error vacuuming database: %s", err)
|
||||
}
|
||||
_, err = ei.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
if err != nil {
|
||||
log.Warnf("error checkpointing wal: %s", err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully migrated events to version 2 in %s", time.Since(now))
|
||||
|
||||
return nil
|
||||
|
@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/hashicorp/golang-lru/arc/v2"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -14,11 +14,11 @@ type messageCache struct {
|
||||
api EventAPI
|
||||
|
||||
blockMsgLk sync.Mutex
|
||||
blockMsgCache *lru.ARCCache[cid.Cid, *api.BlockMessages]
|
||||
blockMsgCache *arc.ARCCache[cid.Cid, *api.BlockMessages]
|
||||
}
|
||||
|
||||
func newMessageCache(a EventAPI) *messageCache {
|
||||
blsMsgCache, _ := lru.NewARC[cid.Cid, *api.BlockMessages](500)
|
||||
blsMsgCache, _ := arc.NewARC[cid.Cid, *api.BlockMessages](500)
|
||||
|
||||
return &messageCache{
|
||||
api: a,
|
||||
|
@ -1,25 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
)
|
||||
|
||||
type contextStore struct {
|
||||
ctx context.Context
|
||||
cst *cbor.BasicIpldStore
|
||||
}
|
||||
|
||||
func (cs *contextStore) Context() context.Context {
|
||||
return cs.ctx
|
||||
}
|
||||
|
||||
func (cs *contextStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
|
||||
return cs.cst.Get(ctx, c, out)
|
||||
}
|
||||
|
||||
func (cs *contextStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
|
||||
return cs.cst.Put(ctx, v)
|
||||
}
|
@ -362,7 +362,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
|
||||
rbase = entries[len(entries)-1]
|
||||
}
|
||||
|
||||
eproof, err := IsRoundWinner(ctx, pts, round, m, rbase, mbi, mc)
|
||||
eproof, err := IsRoundWinner(ctx, round, m, rbase, mbi, mc)
|
||||
if err != nil {
|
||||
return nil, nil, nil, xerrors.Errorf("checking round winner failed: %w", err)
|
||||
}
|
||||
@ -376,7 +376,7 @@ func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m add
|
||||
buf.Write(pts.MinTicket().VRFProof)
|
||||
}
|
||||
|
||||
ticketRand, err := rand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
|
||||
ticketRand, err := rand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
@ -449,18 +449,19 @@ func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Ad
|
||||
}
|
||||
|
||||
func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, miners []address.Address, msgs [][]*types.SignedMessage, nulls abi.ChainEpoch) (*store.FullTipSet, error) {
|
||||
ctx := context.TODO()
|
||||
var blks []*types.FullBlock
|
||||
|
||||
for round := base.Height() + nulls + 1; len(blks) == 0; round++ {
|
||||
for mi, m := range miners {
|
||||
bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round)
|
||||
bvals, et, ticket, err := cg.nextBlockProof(ctx, base, m, round)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("next block proof: %w", err)
|
||||
}
|
||||
|
||||
if et != nil {
|
||||
// TODO: maybe think about passing in more real parameters to this?
|
||||
wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0)
|
||||
wpost, err := cg.eppProvs[m].ComputeProof(ctx, nil, nil, round, network.Version0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -476,8 +477,18 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet,
|
||||
}
|
||||
|
||||
fts := store.NewFullTipSet(blks)
|
||||
if err := cg.cs.PutTipSet(context.TODO(), fts.TipSet()); err != nil {
|
||||
return nil, err
|
||||
if err := cg.cs.PersistTipsets(ctx, []*types.TipSet{fts.TipSet()}); err != nil {
|
||||
return nil, xerrors.Errorf("failed to persist tipset: %w", err)
|
||||
}
|
||||
|
||||
for _, blk := range blks {
|
||||
if err := cg.cs.AddToTipSetTracker(ctx, blk.Header); err != nil {
|
||||
return nil, xerrors.Errorf("failed to add to tipset tracker: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := cg.cs.RefreshHeaviestTipSet(ctx, fts.TipSet().Height()); err != nil {
|
||||
return nil, xerrors.Errorf("failed to put tipset: %w", err)
|
||||
}
|
||||
|
||||
cg.CurTipset = fts
|
||||
@ -628,7 +639,7 @@ func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInf
|
||||
return ValidWpostForTesting, nil
|
||||
}
|
||||
|
||||
func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
|
||||
func IsRoundWinner(ctx context.Context, round abi.ChainEpoch,
|
||||
miner address.Address, brand types.BeaconEntry, mbi *api.MiningBaseInfo, a MiningCheckAPI) (*types.ElectionProof, error) {
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
@ -636,7 +647,7 @@ func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,
|
||||
return nil, xerrors.Errorf("failed to cbor marshal address: %w", err)
|
||||
}
|
||||
|
||||
electionRand, err := rand.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())
|
||||
electionRand, err := rand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to draw randomness: %w", err)
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/consensus"
|
||||
lrand "github.com/filecoin-project/lotus/chain/rand"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -590,19 +591,21 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var _ lrand.Rand = new(fakeRand)
|
||||
|
||||
// TODO: copied from actors test harness, deduplicate or remove from here
|
||||
type fakeRand struct{}
|
||||
|
||||
func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func (fr *fakeRand) GetChainRandomness(ctx context.Context, randEpoch abi.ChainEpoch) ([32]byte, error) {
|
||||
out := make([]byte, 32)
|
||||
_, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint
|
||||
return out, nil
|
||||
return *(*[32]byte)(out), nil
|
||||
}
|
||||
|
||||
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, randEpoch abi.ChainEpoch) ([32]byte, error) {
|
||||
out := make([]byte, 32)
|
||||
_, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint
|
||||
return out, nil
|
||||
return *(*[32]byte)(out), nil
|
||||
}
|
||||
|
||||
func currentTotalPower(ctx context.Context, vm vm.Interface, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) {
|
||||
|
@ -26,20 +26,30 @@ func New(dstore ds.Batching) *SlashFilter {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) (cid.Cid, error) {
|
||||
func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) (cid.Cid, bool, error) {
|
||||
epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height))
|
||||
{
|
||||
// double-fork mining (2 blocks at one epoch)
|
||||
if witness, err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil {
|
||||
return witness, xerrors.Errorf("check double-fork mining faults: %w", err)
|
||||
doubleForkWitness, doubleForkFault, err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults")
|
||||
if err != nil {
|
||||
return cid.Undef, false, xerrors.Errorf("check double-fork mining faults: %w", err)
|
||||
}
|
||||
|
||||
if doubleForkFault {
|
||||
return doubleForkWitness, doubleForkFault, nil
|
||||
}
|
||||
}
|
||||
|
||||
parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes()))
|
||||
{
|
||||
// time-offset mining faults (2 blocks with the same parents)
|
||||
if witness, err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil {
|
||||
return witness, xerrors.Errorf("check time-offset mining faults: %w", err)
|
||||
timeOffsetWitness, timeOffsetFault, err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults")
|
||||
if err != nil {
|
||||
return cid.Undef, false, xerrors.Errorf("check time-offset mining faults: %w", err)
|
||||
}
|
||||
|
||||
if timeOffsetFault {
|
||||
return timeOffsetWitness, timeOffsetFault, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,19 +60,19 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par
|
||||
parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch))
|
||||
have, err := f.byEpoch.Has(ctx, parentEpochKey)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
return cid.Undef, false, xerrors.Errorf("failed to read from db: %w", err)
|
||||
}
|
||||
|
||||
if have {
|
||||
// If we had, make sure it's in our parent tipset
|
||||
cidb, err := f.byEpoch.Get(ctx, parentEpochKey)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting other block cid: %w", err)
|
||||
return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err)
|
||||
}
|
||||
|
||||
_, parent, err := cid.CidFromBytes(cidb)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
return cid.Undef, false, xerrors.Errorf("failed to read cid from bytes: %w", err)
|
||||
}
|
||||
|
||||
var found bool
|
||||
@ -73,45 +83,45 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par
|
||||
}
|
||||
|
||||
if !found {
|
||||
return parent, xerrors.Errorf("produced block would trigger 'parent-grinding fault' consensus fault; miner: %s; bh: %s, expected parent: %s", bh.Miner, bh.Cid(), parent)
|
||||
return parent, true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||
return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||
}
|
||||
|
||||
if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||
return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err)
|
||||
}
|
||||
|
||||
return cid.Undef, nil
|
||||
return cid.Undef, false, nil
|
||||
}
|
||||
|
||||
func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) (cid.Cid, error) {
|
||||
func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) (cid.Cid, bool, error) {
|
||||
fault, err := t.Has(ctx, key)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to read from datastore: %w", err)
|
||||
return cid.Undef, false, xerrors.Errorf("failed to read from datastore: %w", err)
|
||||
}
|
||||
|
||||
if fault {
|
||||
cidb, err := t.Get(ctx, key)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("getting other block cid: %w", err)
|
||||
return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err)
|
||||
}
|
||||
|
||||
_, other, err := cid.CidFromBytes(cidb)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
return cid.Undef, false, xerrors.Errorf("failed to read cid of other block: %w", err)
|
||||
}
|
||||
|
||||
if other == bh.Cid() {
|
||||
return cid.Undef, nil
|
||||
return cid.Undef, false, nil
|
||||
}
|
||||
|
||||
return other, xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other)
|
||||
return other, true, nil
|
||||
}
|
||||
|
||||
return cid.Undef, nil
|
||||
return cid.Undef, false, nil
|
||||
}
|
||||
|
179
chain/gen/slashfilter/slashsvc/slashservice.go
Normal file
179
chain/gen/slashfilter/slashsvc/slashservice.go
Normal file
@ -0,0 +1,179 @@
|
||||
package slashsvc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
levelds "github.com/ipfs/go-ds-leveldb"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var log = logging.Logger("slashsvc")
|
||||
|
||||
type ConsensusSlasherApi interface {
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
|
||||
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *lapi.MessageSendSpec) (*types.SignedMessage, error)
|
||||
SyncIncomingBlocks(context.Context) (<-chan *types.BlockHeader, error)
|
||||
WalletDefaultAddress(context.Context) (address.Address, error)
|
||||
}
|
||||
|
||||
func SlashConsensus(ctx context.Context, a ConsensusSlasherApi, p string, from string) error {
|
||||
var fromAddr address.Address
|
||||
|
||||
ds, err := levelds.NewDatastore(p, &levelds.Options{
|
||||
Compression: ldbopts.NoCompression,
|
||||
NoSync: false,
|
||||
Strict: ldbopts.StrictAll,
|
||||
ReadOnly: false,
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("open leveldb: %w", err)
|
||||
}
|
||||
sf := slashfilter.New(ds)
|
||||
if from == "" {
|
||||
defaddr, err := a.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fromAddr = defaddr
|
||||
} else {
|
||||
addr, err := address.NewFromString(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fromAddr = addr
|
||||
}
|
||||
|
||||
blocks, err := a.SyncIncomingBlocks(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("sync incoming blocks failed: %w", err)
|
||||
}
|
||||
|
||||
log.Infow("consensus fault reporter", "from", fromAddr)
|
||||
go func() {
|
||||
for block := range blocks {
|
||||
otherBlock, extraBlock, fault, err := slashFilterMinedBlock(ctx, sf, a, block)
|
||||
if err != nil {
|
||||
log.Errorf("slash detector errored: %s", err)
|
||||
continue
|
||||
}
|
||||
if fault {
|
||||
log.Errorf("<!!> SLASH FILTER DETECTED FAULT DUE TO BLOCKS %s and %s", otherBlock.Cid(), block.Cid())
|
||||
bh1, err := cborutil.Dump(otherBlock)
|
||||
if err != nil {
|
||||
log.Errorf("could not dump otherblock:%s, err:%s", otherBlock.Cid(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
bh2, err := cborutil.Dump(block)
|
||||
if err != nil {
|
||||
log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
params := miner.ReportConsensusFaultParams{
|
||||
BlockHeader1: bh1,
|
||||
BlockHeader2: bh2,
|
||||
}
|
||||
if extraBlock != nil {
|
||||
be, err := cborutil.Dump(extraBlock)
|
||||
if err != nil {
|
||||
log.Errorf("could not dump block:%s, err:%s", block.Cid(), err)
|
||||
continue
|
||||
}
|
||||
params.BlockHeaderExtra = be
|
||||
}
|
||||
|
||||
enc, err := actors.SerializeParams(¶ms)
|
||||
if err != nil {
|
||||
log.Errorf("could not serialize declare faults parameters: %s", err)
|
||||
continue
|
||||
}
|
||||
for {
|
||||
head, err := a.ChainHead(ctx)
|
||||
if err != nil || head.Height() > block.Height {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 10)
|
||||
}
|
||||
message, err := a.MpoolPushMessage(ctx, &types.Message{
|
||||
To: block.Miner,
|
||||
From: fromAddr,
|
||||
Value: types.NewInt(0),
|
||||
Method: builtin.MethodsMiner.ReportConsensusFault,
|
||||
Params: enc,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
log.Errorf("ReportConsensusFault to messagepool error:%s", err)
|
||||
continue
|
||||
}
|
||||
log.Infof("ReportConsensusFault message CID:%s", message.Cid())
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func slashFilterMinedBlock(ctx context.Context, sf *slashfilter.SlashFilter, a ConsensusSlasherApi, blockB *types.BlockHeader) (*types.BlockHeader, *types.BlockHeader, bool, error) {
|
||||
blockC, err := a.ChainGetBlock(ctx, blockB.Parents[0])
|
||||
if err != nil {
|
||||
return nil, nil, false, xerrors.Errorf("chain get block error:%s", err)
|
||||
}
|
||||
|
||||
blockACid, fault, err := sf.MinedBlock(ctx, blockB, blockC.Height)
|
||||
if err != nil {
|
||||
return nil, nil, false, xerrors.Errorf("slash filter check block error:%s", err)
|
||||
}
|
||||
|
||||
if !fault {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
|
||||
blockA, err := a.ChainGetBlock(ctx, blockACid)
|
||||
if err != nil {
|
||||
return nil, nil, false, xerrors.Errorf("failed to get blockA: %w", err)
|
||||
}
|
||||
|
||||
// (a) double-fork mining (2 blocks at one epoch)
|
||||
if blockA.Height == blockB.Height {
|
||||
return blockA, nil, true, nil
|
||||
}
|
||||
|
||||
// (b) time-offset mining faults (2 blocks with the same parents)
|
||||
if types.CidArrsEqual(blockB.Parents, blockA.Parents) {
|
||||
return blockA, nil, true, nil
|
||||
}
|
||||
|
||||
// (c) parent-grinding fault
|
||||
// Here extra is the "witness", a third block that shows the connection between A and B as
|
||||
// A's sibling and B's parent.
|
||||
// Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset
|
||||
//
|
||||
// B
|
||||
// |
|
||||
// [A, C]
|
||||
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
|
||||
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
|
||||
return blockA, blockC, true, nil
|
||||
}
|
||||
|
||||
log.Error("unexpectedly reached end of slashFilterMinedBlock despite fault being reported!")
|
||||
return nil, nil, false, nil
|
||||
}
|
@ -39,23 +39,6 @@ func (ps *Store) save(ctx context.Context, state *FundedAddressState) error {
|
||||
return ps.ds.Put(ctx, k, b)
|
||||
}
|
||||
|
||||
// get the state for the given address
|
||||
func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) {
|
||||
k := dskeyForAddr(addr)
|
||||
|
||||
data, err := ps.ds.Get(ctx, k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var state FundedAddressState
|
||||
err = cborrpc.ReadCborRPC(bytes.NewReader(data), &state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &state, nil
|
||||
}
|
||||
|
||||
// forEach calls iter with each address in the datastore
|
||||
func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error {
|
||||
res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr})
|
||||
|
@ -63,6 +63,9 @@ var MaxNonceGap = uint64(4)
|
||||
|
||||
const MaxMessageSize = 64 << 10 // 64KiB
|
||||
|
||||
// NOTE: When adding a new error type, please make sure to add the new error type in
|
||||
// func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message)
|
||||
// in /chain/sub/incoming.go
|
||||
var (
|
||||
ErrMessageTooBig = errors.New("message too big")
|
||||
|
||||
|
@ -17,18 +17,20 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/beacon"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
)
|
||||
|
||||
var log = logging.Logger("rand")
|
||||
|
||||
func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func DrawRandomnessFromBase(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
return DrawRandomnessFromDigest(blake2b.Sum256(rbase), pers, round, entropy)
|
||||
}
|
||||
|
||||
func DrawRandomnessFromDigest(digest [32]byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
h := blake2b.New256()
|
||||
if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil {
|
||||
return nil, xerrors.Errorf("deriving randomness: %w", err)
|
||||
}
|
||||
VRFDigest := blake2b.Sum256(rbase)
|
||||
_, err := h.Write(VRFDigest[:])
|
||||
_, err := h.Write(digest[:])
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("hashing VRFDigest: %w", err)
|
||||
}
|
||||
@ -70,18 +72,18 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch
|
||||
return randTs, nil
|
||||
}
|
||||
|
||||
func (sr *stateRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) {
|
||||
func (sr *stateRand) getChainRandomness(ctx context.Context, round abi.ChainEpoch, lookback bool) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "store.GetChainRandomness")
|
||||
defer span.End()
|
||||
span.AddAttributes(trace.Int64Attribute("round", int64(round)))
|
||||
|
||||
ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
if round > ts.Height() {
|
||||
return nil, xerrors.Errorf("cannot draw randomness from the future")
|
||||
return [32]byte{}, xerrors.Errorf("cannot draw randomness from the future")
|
||||
}
|
||||
|
||||
searchHeight := round
|
||||
@ -91,14 +93,10 @@ func (sr *stateRand) getChainRandomness(ctx context.Context, pers crypto.DomainS
|
||||
|
||||
randTs, err := sr.cs.GetTipsetByHeight(ctx, searchHeight, ts, lookback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
mtb := randTs.MinTicketBlock()
|
||||
|
||||
// if at (or just past -- for null epochs) appropriate epoch
|
||||
// or at genesis (works for negative epochs)
|
||||
return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy)
|
||||
return blake2b.Sum256(randTs.MinTicketBlock().Ticket.VRFProof), nil
|
||||
}
|
||||
|
||||
type NetworkVersionGetter func(context.Context, abi.ChainEpoch) network.Version
|
||||
@ -110,7 +108,12 @@ type stateRand struct {
|
||||
networkVersionGetter NetworkVersionGetter
|
||||
}
|
||||
|
||||
func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, networkVersionGetter NetworkVersionGetter) vm.Rand {
|
||||
type Rand interface {
|
||||
GetChainRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error)
|
||||
GetBeaconRandomness(ctx context.Context, round abi.ChainEpoch) ([32]byte, error)
|
||||
}
|
||||
|
||||
func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, networkVersionGetter NetworkVersionGetter) Rand {
|
||||
return &stateRand{
|
||||
cs: cs,
|
||||
blks: blks,
|
||||
@ -120,76 +123,102 @@ func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, netwo
|
||||
}
|
||||
|
||||
// network v0-12
|
||||
func (sr *stateRand) getBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func (sr *stateRand) getBeaconRandomnessV1(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
|
||||
randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// if at (or just past -- for null epochs) appropriate epoch
|
||||
// or at genesis (works for negative epochs)
|
||||
return DrawRandomness(be.Data, pers, round, entropy)
|
||||
return blake2b.Sum256(be.Data), nil
|
||||
}
|
||||
|
||||
// network v13
|
||||
func (sr *stateRand) getBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func (sr *stateRand) getBeaconRandomnessV2(ctx context.Context, round abi.ChainEpoch) ([32]byte, error) {
|
||||
randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
// if at (or just past -- for null epochs) appropriate epoch
|
||||
// or at genesis (works for negative epochs)
|
||||
return DrawRandomness(be.Data, pers, round, entropy)
|
||||
return blake2b.Sum256(be.Data), nil
|
||||
}
|
||||
|
||||
// network v14 and on
|
||||
func (sr *stateRand) getBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func (sr *stateRand) getBeaconRandomnessV3(ctx context.Context, filecoinEpoch abi.ChainEpoch) ([32]byte, error) {
|
||||
if filecoinEpoch < 0 {
|
||||
return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy)
|
||||
return sr.getBeaconRandomnessV2(ctx, filecoinEpoch)
|
||||
}
|
||||
|
||||
be, err := sr.extractBeaconEntryForEpoch(ctx, filecoinEpoch)
|
||||
if err != nil {
|
||||
log.Errorf("failed to get beacon entry as expected: %s", err)
|
||||
return nil, err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
|
||||
return DrawRandomness(be.Data, pers, filecoinEpoch, entropy)
|
||||
return blake2b.Sum256(be.Data), nil
|
||||
}
|
||||
|
||||
func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func (sr *stateRand) GetChainRandomness(ctx context.Context, filecoinEpoch abi.ChainEpoch) ([32]byte, error) {
|
||||
nv := sr.networkVersionGetter(ctx, filecoinEpoch)
|
||||
|
||||
if nv >= network.Version13 {
|
||||
return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, false)
|
||||
return sr.getChainRandomness(ctx, filecoinEpoch, false)
|
||||
}
|
||||
|
||||
return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, true)
|
||||
return sr.getChainRandomness(ctx, filecoinEpoch, true)
|
||||
}
|
||||
|
||||
func (sr *stateRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
func (sr *stateRand) GetBeaconRandomness(ctx context.Context, filecoinEpoch abi.ChainEpoch) ([32]byte, error) {
|
||||
nv := sr.networkVersionGetter(ctx, filecoinEpoch)
|
||||
|
||||
if nv >= network.Version14 {
|
||||
return sr.getBeaconRandomnessV3(ctx, pers, filecoinEpoch, entropy)
|
||||
return sr.getBeaconRandomnessV3(ctx, filecoinEpoch)
|
||||
} else if nv == network.Version13 {
|
||||
return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy)
|
||||
return sr.getBeaconRandomnessV2(ctx, filecoinEpoch)
|
||||
} else {
|
||||
return sr.getBeaconRandomnessV1(ctx, pers, filecoinEpoch, entropy)
|
||||
return sr.getBeaconRandomnessV1(ctx, filecoinEpoch)
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *stateRand) DrawChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
digest, err := sr.GetChainRandomness(ctx, filecoinEpoch)
|
||||
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get chain randomness: %w", err)
|
||||
}
|
||||
|
||||
ret, err := DrawRandomnessFromDigest(digest, pers, filecoinEpoch, entropy)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to draw chain randomness: %w", err)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sr *stateRand) DrawBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {
|
||||
digest, err := sr.GetBeaconRandomness(ctx, filecoinEpoch)
|
||||
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get beacon randomness: %w", err)
|
||||
}
|
||||
|
||||
ret, err := DrawRandomnessFromDigest(digest, pers, filecoinEpoch, entropy)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to draw beacon randomness: %w", err)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||
randTs, err := sr.GetBeaconRandomnessTipset(ctx, filecoinEpoch, false)
|
||||
if err != nil {
|
||||
|
@ -69,7 +69,7 @@ func TestNullRandomnessV1(t *testing.T) {
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
|
||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
rand2, err := rand.DrawRandomnessFromBase(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -148,8 +148,8 @@ func TestNullRandomnessV2(t *testing.T) {
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_03
|
||||
// note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height)
|
||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
// note that the randEpoch passed to DrawRandomnessFromBase is still randEpoch (not the latest ts height)
|
||||
rand2, err := rand.DrawRandomnessFromBase(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -232,7 +232,7 @@ func TestNullRandomnessV3(t *testing.T) {
|
||||
}
|
||||
|
||||
//stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01
|
||||
rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
rand2, err := rand.DrawRandomnessFromBase(resp.Entry.Data, pers, randEpoch, entropy)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) {
|
||||
case network.Version13, network.Version14, network.Version15, network.Version16, network.Version17:
|
||||
return types.StateTreeVersion4, nil
|
||||
|
||||
case network.Version18, network.Version19, network.Version20:
|
||||
case network.Version18, network.Version19, network.Version20, network.Version21:
|
||||
return types.StateTreeVersion5, nil
|
||||
|
||||
default:
|
||||
|
@ -355,7 +355,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule
|
||||
return nil, xerrors.Errorf("failed to marshal miner address: %w", err)
|
||||
}
|
||||
|
||||
prand, err := rand.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
|
||||
prand, err := rand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err)
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -11,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -177,11 +179,15 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig
|
||||
u := sm.stateMigrations[height]
|
||||
if u != nil && u.upgrade != nil {
|
||||
migCid, ok, err := u.migrationResultCache.Get(ctx, root)
|
||||
if err == nil && ok {
|
||||
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
|
||||
return migCid, nil
|
||||
} else if err != nil {
|
||||
if err == nil {
|
||||
if ok {
|
||||
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
|
||||
return migCid, nil
|
||||
}
|
||||
} else if !errors.Is(err, datastore.ErrNotFound) {
|
||||
log.Errorw("failed to lookup previous migration result", "err", err)
|
||||
} else {
|
||||
log.Debug("no cached migration found, migrating from scratch")
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
@ -226,11 +232,6 @@ func (sm *StateManager) hasExpensiveForkBetween(parent, height abi.ChainEpoch) b
|
||||
return false
|
||||
}
|
||||
|
||||
func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool {
|
||||
_, ok := sm.expensiveUpgrades[height]
|
||||
return ok
|
||||
}
|
||||
|
||||
func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv16.MemMigrationCache, ts *types.TipSet) {
|
||||
height := ts.Height()
|
||||
parent := ts.ParentState()
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/hashicorp/golang-lru/arc/v2"
|
||||
"github.com/ipfs/go-cid"
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
@ -156,7 +156,7 @@ type StateManager struct {
|
||||
|
||||
// We keep a small cache for calls to ExecutionTrace which helps improve
|
||||
// performance for node operators like exchanges and block explorers
|
||||
execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
|
||||
execTraceCache *arc.ARCCache[types.TipSetKey, tipSetCacheEntry]
|
||||
// We need a lock while making the copy as to prevent other callers
|
||||
// overwrite the cache while making the copy
|
||||
execTraceCacheLock sync.Mutex
|
||||
@ -213,10 +213,10 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
|
||||
}
|
||||
|
||||
log.Debugf("execTraceCache size: %d", execTraceCacheSize)
|
||||
var execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry]
|
||||
var execTraceCache *arc.ARCCache[types.TipSetKey, tipSetCacheEntry]
|
||||
var err error
|
||||
if execTraceCacheSize > 0 {
|
||||
execTraceCache, err = lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
|
||||
execTraceCache, err = arc.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -509,7 +509,17 @@ func (sm *StateManager) GetRandomnessFromBeacon(ctx context.Context, personaliza
|
||||
|
||||
r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
|
||||
|
||||
return r.GetBeaconRandomness(ctx, personalization, randEpoch, entropy)
|
||||
digest, err := r.GetBeaconRandomness(ctx, randEpoch)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting beacon randomness: %w", err)
|
||||
}
|
||||
|
||||
ret, err := rand.DrawRandomnessFromDigest(digest, personalization, randEpoch, entropy)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("drawing beacon randomness: %w", err)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
|
||||
}
|
||||
|
||||
@ -521,5 +531,38 @@ func (sm *StateManager) GetRandomnessFromTickets(ctx context.Context, personaliz
|
||||
|
||||
r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
|
||||
|
||||
return r.GetChainRandomness(ctx, personalization, randEpoch, entropy)
|
||||
digest, err := r.GetChainRandomness(ctx, randEpoch)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting chain randomness: %w", err)
|
||||
}
|
||||
|
||||
ret, err := rand.DrawRandomnessFromDigest(digest, personalization, randEpoch, entropy)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("drawing chain randomness: %w", err)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sm *StateManager) GetRandomnessDigestFromBeacon(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) ([32]byte, error) {
|
||||
pts, err := sm.ChainStore().GetTipSetFromKey(ctx, tsk)
|
||||
if err != nil {
|
||||
return [32]byte{}, xerrors.Errorf("loading tipset %s: %w", tsk, err)
|
||||
}
|
||||
|
||||
r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
|
||||
|
||||
return r.GetBeaconRandomness(ctx, randEpoch)
|
||||
|
||||
}
|
||||
|
||||
func (sm *StateManager) GetRandomnessDigestFromTickets(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) ([32]byte, error) {
|
||||
pts, err := sm.ChainStore().LoadTipSet(ctx, tsk)
|
||||
if err != nil {
|
||||
return [32]byte{}, xerrors.Errorf("loading tipset key: %w", err)
|
||||
}
|
||||
|
||||
r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion)
|
||||
|
||||
return r.GetChainRandomness(ctx, randEpoch)
|
||||
}
|
||||
|
@ -388,6 +388,14 @@ func (sm *StateManager) GetCirculatingSupply(ctx context.Context, height abi.Cha
|
||||
circ := big.Zero()
|
||||
unCirc := big.Zero()
|
||||
err := st.ForEach(func(a address.Address, actor *types.Actor) error {
|
||||
// this can be a lengthy operation, we need to cancel early when
|
||||
// the context is cancelled to avoid resource exhaustion
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// this will cause ForEach to return
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
switch {
|
||||
case actor.Balance.IsZero():
|
||||
// Do nothing for zero-balance actors
|
||||
|
@ -70,7 +70,7 @@ func TestChainCheckpoint(t *testing.T) {
|
||||
}
|
||||
|
||||
// See if the chain will take the fork, it shouldn't.
|
||||
err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
|
||||
err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
|
||||
require.NoError(t, err)
|
||||
head = cs.GetHeaviestTipSet()
|
||||
require.True(t, head.Equals(checkpoint))
|
||||
@ -80,7 +80,7 @@ func TestChainCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Now switch to the other fork.
|
||||
err = cs.MaybeTakeHeavierTipSet(context.Background(), last)
|
||||
err = cs.RefreshHeaviestTipSet(context.Background(), last.Height())
|
||||
require.NoError(t, err)
|
||||
head = cs.GetHeaviestTipSet()
|
||||
require.True(t, head.Equals(last))
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||
)
|
||||
|
||||
@ -47,28 +48,29 @@ func TestIndexSeeks(t *testing.T) {
|
||||
}
|
||||
|
||||
cur := mock.TipSet(gen)
|
||||
if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.NoError(t, cs.SetGenesis(ctx, gen))
|
||||
|
||||
// Put 113 blocks from genesis
|
||||
for i := 0; i < 113; i++ {
|
||||
nextts := mock.TipSet(mock.MkBlock(cur, 1, 1))
|
||||
|
||||
if err := cs.PutTipSet(ctx, nextts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
nextBlk := mock.MkBlock(cur, 1, 1)
|
||||
nextts := mock.TipSet(nextBlk)
|
||||
assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{nextts}))
|
||||
assert.NoError(t, cs.AddToTipSetTracker(ctx, nextBlk))
|
||||
cur = nextts
|
||||
}
|
||||
|
||||
assert.NoError(t, cs.RefreshHeaviestTipSet(ctx, cur.Height()))
|
||||
|
||||
// Put 50 null epochs + 1 block
|
||||
skip := mock.MkBlock(cur, 1, 1)
|
||||
skip.Height += 50
|
||||
|
||||
skipts := mock.TipSet(skip)
|
||||
|
||||
if err := cs.PutTipSet(ctx, skipts); err != nil {
|
||||
assert.NoError(t, cs.PersistTipsets(ctx, []*types.TipSet{skipts}))
|
||||
assert.NoError(t, cs.AddToTipSetTracker(ctx, skip))
|
||||
|
||||
if err := cs.RefreshHeaviestTipSet(ctx, skip.Height); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"github.com/ipld/go-car"
|
||||
carutil "github.com/ipld/go-car/util"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/multiformats/go-multicodec"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -369,14 +369,16 @@ func (s *walkScheduler) Wait() error {
|
||||
}
|
||||
|
||||
func (s *walkScheduler) enqueueIfNew(task walkTask) {
|
||||
if task.c.Prefix().MhType == mh.IDENTITY {
|
||||
if multicodec.Code(task.c.Prefix().MhType) == multicodec.Identity {
|
||||
//log.Infow("ignored", "cid", todo.c.String())
|
||||
return
|
||||
}
|
||||
|
||||
// This lets through RAW and CBOR blocks, the only two types that we
|
||||
// end up writing to the exported CAR.
|
||||
if task.c.Prefix().Codec != cid.Raw && task.c.Prefix().Codec != cid.DagCBOR {
|
||||
// This lets through RAW, CBOR, and DagCBOR blocks, the only types that we end up writing to
|
||||
// the exported CAR.
|
||||
switch multicodec.Code(task.c.Prefix().Codec) {
|
||||
case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
|
||||
default:
|
||||
//log.Infow("ignored", "cid", todo.c.String())
|
||||
return
|
||||
}
|
||||
@ -450,7 +452,8 @@ func (s *walkScheduler) processTask(t walkTask, workerN int) error {
|
||||
// We exported the ipld block. If it wasn't a CBOR block, there's nothing
|
||||
// else to do and we can bail out early as it won't have any links
|
||||
// etc.
|
||||
if t.c.Prefix().Codec != cid.DagCBOR || t.c.Prefix().MhType == mh.IDENTITY {
|
||||
if multicodec.Code(t.c.Prefix().Codec) != multicodec.DagCbor ||
|
||||
multicodec.Code(t.c.Prefix().MhType) == multicodec.Identity {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -683,14 +686,13 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
|
||||
prefix := c.Prefix()
|
||||
|
||||
// Don't include identity CIDs.
|
||||
if prefix.MhType == mh.IDENTITY {
|
||||
if multicodec.Code(prefix.MhType) == multicodec.Identity {
|
||||
continue
|
||||
}
|
||||
|
||||
// We only include raw and dagcbor, for now.
|
||||
// Raw for "code" CIDs.
|
||||
switch prefix.Codec {
|
||||
case cid.Raw, cid.DagCBOR:
|
||||
// We only include raw, cbor, and dagcbor, for now.
|
||||
switch multicodec.Code(prefix.Codec) {
|
||||
case multicodec.Cbor, multicodec.DagCbor, multicodec.Raw:
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@ -722,7 +724,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
|
||||
}
|
||||
|
||||
func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) {
|
||||
if root.Prefix().Codec != cid.DagCBOR {
|
||||
if multicodec.Code(root.Prefix().Codec) != multicodec.DagCbor {
|
||||
return in, nil
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
"github.com/hashicorp/golang-lru/arc/v2"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
@ -120,8 +120,8 @@ type ChainStore struct {
|
||||
reorgCh chan<- reorg
|
||||
reorgNotifeeCh chan ReorgNotifee
|
||||
|
||||
mmCache *lru.ARCCache[cid.Cid, mmCids]
|
||||
tsCache *lru.ARCCache[types.TipSetKey, *types.TipSet]
|
||||
mmCache *arc.ARCCache[cid.Cid, mmCids]
|
||||
tsCache *arc.ARCCache[types.TipSetKey, *types.TipSet]
|
||||
|
||||
evtTypes [1]journal.EventType
|
||||
journal journal.Journal
|
||||
@ -133,8 +133,8 @@ type ChainStore struct {
|
||||
}
|
||||
|
||||
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
|
||||
c, _ := lru.NewARC[cid.Cid, mmCids](DefaultMsgMetaCacheSize)
|
||||
tsc, _ := lru.NewARC[types.TipSetKey, *types.TipSet](DefaultTipSetCacheSize)
|
||||
c, _ := arc.NewARC[cid.Cid, mmCids](DefaultMsgMetaCacheSize)
|
||||
tsc, _ := arc.NewARC[types.TipSetKey, *types.TipSet](DefaultTipSetCacheSize)
|
||||
if j == nil {
|
||||
j = journal.NilJournal()
|
||||
}
|
||||
@ -367,49 +367,32 @@ func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid)
|
||||
func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error {
|
||||
ts, err := types.NewTipSet([]*types.BlockHeader{b})
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to construct genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
if err := cs.PutTipSet(ctx, ts); err != nil {
|
||||
return err
|
||||
if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
|
||||
return xerrors.Errorf("failed to persist genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
if err := cs.AddToTipSetTracker(ctx, b); err != nil {
|
||||
return xerrors.Errorf("failed to add genesis tipset to tracker: %w", err)
|
||||
}
|
||||
|
||||
if err := cs.RefreshHeaviestTipSet(ctx, ts.Height()); err != nil {
|
||||
return xerrors.Errorf("failed to put genesis tipset: %w", err)
|
||||
}
|
||||
|
||||
return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes())
|
||||
}
|
||||
|
||||
func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
if err := cs.PersistTipsets(ctx, []*types.TipSet{ts}); err != nil {
|
||||
return xerrors.Errorf("failed to persist tipset: %w", err)
|
||||
}
|
||||
|
||||
expanded, err := cs.expandTipset(ctx, ts.Blocks()[0])
|
||||
if err != nil {
|
||||
return xerrors.Errorf("errored while expanding tipset: %w", err)
|
||||
}
|
||||
|
||||
if expanded.Key() != ts.Key() {
|
||||
log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids())
|
||||
|
||||
tsBlk, err := expanded.Key().ToStorageBlock()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get tipset key block: %w", err)
|
||||
}
|
||||
|
||||
if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
|
||||
return xerrors.Errorf("failed to put tipset key block: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil {
|
||||
return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our
|
||||
// internal state as our new head, if and only if it is heavier than the current
|
||||
// head and does not exceed the maximum fork length.
|
||||
func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error {
|
||||
// RefreshHeaviestTipSet receives a newTsHeight at which a new tipset might exist. It then:
|
||||
// - "refreshes" the heaviest tipset that can be formed at its current heaviest height
|
||||
// - if equivocation is detected among the miners of the current heaviest tipset, the head is immediately updated to the heaviest tipset that can be formed in a range of 5 epochs
|
||||
//
|
||||
// - forms the best tipset that can be formed at the _input_ height
|
||||
// - compares the three tipset weights: "current" heaviest tipset, "refreshed" tipset, and best tipset at newTsHeight
|
||||
// - updates "current" heaviest to the heaviest of those 3 tipsets (if an update is needed), assuming it doesn't violate the maximum fork rule
|
||||
func (cs *ChainStore) RefreshHeaviestTipSet(ctx context.Context, newTsHeight abi.ChainEpoch) error {
|
||||
for {
|
||||
cs.heaviestLk.Lock()
|
||||
if len(cs.reorgCh) < reorgChBuf/2 {
|
||||
@ -426,39 +409,90 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS
|
||||
|
||||
defer cs.heaviestLk.Unlock()
|
||||
|
||||
if ts.Equals(cs.heaviest) {
|
||||
heaviestWeight, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to calculate currentHeaviest's weight: %w", err)
|
||||
}
|
||||
|
||||
heaviestHeight := abi.ChainEpoch(0)
|
||||
if cs.heaviest != nil {
|
||||
heaviestHeight = cs.heaviest.Height()
|
||||
}
|
||||
|
||||
// Before we look at newTs, let's refresh best tipset at current head's height -- this is done to detect equivocation
|
||||
newHeaviest, newHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, heaviestHeight)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to reform head at same height: %w", err)
|
||||
}
|
||||
|
||||
// Equivocation has occurred! We need a new head NOW!
|
||||
if newHeaviest == nil || newHeaviestWeight.LessThan(heaviestWeight) {
|
||||
log.Warnf("chainstore heaviest tipset's weight SHRANK from %d (%s) to %d (%s) due to equivocation", heaviestWeight, cs.heaviest, newHeaviestWeight, newHeaviest)
|
||||
// Unfortunately, we don't know what the right height to form a new heaviest tipset is.
|
||||
// It is _probably_, but not _necessarily_, heaviestHeight.
|
||||
// So, we need to explore a range of epochs, finding the heaviest tipset in that range.
|
||||
// We thus try to form the heaviest tipset for 5 epochs above heaviestHeight (most of which will likely not exist),
|
||||
// as well as for 5 below.
|
||||
// This is slow, but we expect to almost-never be here (only if miners are equivocating, which carries a hefty penalty).
|
||||
for i := heaviestHeight + 5; i > heaviestHeight-5; i-- {
|
||||
possibleHeaviestTs, possibleHeaviestWeight, err := cs.FormHeaviestTipSetForHeight(ctx, i)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to produce head at height %d: %w", i, err)
|
||||
}
|
||||
|
||||
if possibleHeaviestWeight.GreaterThan(newHeaviestWeight) {
|
||||
newHeaviestWeight = possibleHeaviestWeight
|
||||
newHeaviest = possibleHeaviestTs
|
||||
}
|
||||
}
|
||||
|
||||
// if we've found something, we know it's the heaviest equivocation-free head, take it IMMEDIATELY
|
||||
if newHeaviest != nil {
|
||||
errTake := cs.takeHeaviestTipSet(ctx, newHeaviest)
|
||||
if errTake != nil {
|
||||
return xerrors.Errorf("failed to take newHeaviest tipset as head: %w", err)
|
||||
}
|
||||
} else {
|
||||
// if we haven't found something, just stay with our equivocation-y head
|
||||
newHeaviest = cs.heaviest
|
||||
}
|
||||
}
|
||||
|
||||
// if the new height we were notified about isn't what we just refreshed at, see if we have a heavier tipset there
|
||||
if newTsHeight != newHeaviest.Height() {
|
||||
bestTs, bestTsWeight, err := cs.FormHeaviestTipSetForHeight(ctx, newTsHeight)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to form new heaviest tipset at height %d: %w", newTsHeight, err)
|
||||
}
|
||||
|
||||
heavier := bestTsWeight.GreaterThan(newHeaviestWeight)
|
||||
if bestTsWeight.Equals(newHeaviestWeight) {
|
||||
heavier = breakWeightTie(bestTs, newHeaviest)
|
||||
}
|
||||
|
||||
if heavier {
|
||||
newHeaviest = bestTs
|
||||
}
|
||||
}
|
||||
|
||||
// Everything's the same as before, exit early
|
||||
if newHeaviest.Equals(cs.heaviest) {
|
||||
return nil
|
||||
}
|
||||
|
||||
w, err := cs.weight(ctx, cs.StateBlockstore(), ts)
|
||||
// At this point, it MUST be true that newHeaviest is heavier than cs.heaviest -- update if fork allows
|
||||
exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, newHeaviest)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("failed to check fork length: %w", err)
|
||||
}
|
||||
heaviestW, err := cs.weight(ctx, cs.StateBlockstore(), cs.heaviest)
|
||||
|
||||
if exceeds {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = cs.takeHeaviestTipSet(ctx, newHeaviest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
heavier := w.GreaterThan(heaviestW)
|
||||
if w.Equals(heaviestW) && !ts.Equals(cs.heaviest) {
|
||||
log.Errorw("weight draw", "currTs", cs.heaviest, "ts", ts)
|
||||
heavier = breakWeightTie(ts, cs.heaviest)
|
||||
}
|
||||
|
||||
if heavier {
|
||||
// TODO: don't do this for initial sync. Now that we don't have a
|
||||
// difference between 'bootstrap sync' and 'caught up' sync, we need
|
||||
// some other heuristic.
|
||||
|
||||
exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exceeds {
|
||||
return nil
|
||||
}
|
||||
|
||||
return cs.takeHeaviestTipSet(ctx, ts)
|
||||
return xerrors.Errorf("failed to take heaviest tipset: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -655,6 +689,16 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet)
|
||||
return err
|
||||
}
|
||||
|
||||
// write the tipsetkey block to the blockstore for EthAPI queries
|
||||
tsBlk, err := ts.Key().ToStorageBlock()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get tipset key block: %w", err)
|
||||
}
|
||||
|
||||
if err = cs.chainLocalBlockstore.Put(ctx, tsBlk); err != nil {
|
||||
return xerrors.Errorf("failed to put tipset key block: %w", err)
|
||||
}
|
||||
|
||||
if prevHeaviest != nil { // buf
|
||||
if len(cs.reorgCh) > 0 {
|
||||
log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh))
|
||||
@ -904,6 +948,14 @@ func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetK
|
||||
|
||||
var leftChain, rightChain []*types.TipSet
|
||||
for !left.Equals(right) {
|
||||
// this can take a long time and lot of memory if the tipsets are far apart
|
||||
// since it can be reached through remote calls, we need to
|
||||
// cancel early when possible to prevent resource exhaustion.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
if left.Height() > right.Height() {
|
||||
leftChain = append(leftChain, left)
|
||||
par, err := lts(ctx, left.Parents())
|
||||
@ -960,7 +1012,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
|
||||
// This means that we ideally want to keep only most recent 900 epochs in here
|
||||
// Golang's map iteration starts at a random point in a map.
|
||||
// With 5 tries per epoch, and 900 entries to keep, on average we will have
|
||||
// ~136 garbage entires in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
|
||||
// ~136 garbage entries in the `cs.tipsets` map. (solve for 1-(1-x/(900+x))^5 == 0.5)
|
||||
// Seems good enough to me
|
||||
|
||||
for height := range cs.tipsets {
|
||||
@ -975,6 +1027,7 @@ func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHead
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistTipsets writes the provided blocks and the TipSetKey objects to the blockstore
|
||||
func (cs *ChainStore) PersistTipsets(ctx context.Context, tipsets []*types.TipSet) error {
|
||||
toPersist := make([]*types.BlockHeader, 0, len(tipsets)*int(build.BlocksPerEpoch))
|
||||
tsBlks := make([]block.Block, 0, len(tipsets))
|
||||
@ -1027,44 +1080,72 @@ func (cs *ChainStore) persistBlockHeaders(ctx context.Context, b ...*types.Block
|
||||
return err
|
||||
}
|
||||
|
||||
func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) {
|
||||
// Hold lock for the whole function for now, if it becomes a problem we can
|
||||
// fix pretty easily
|
||||
// FormHeaviestTipSetForHeight looks up all valid blocks at a given height, and returns the heaviest tipset that can be made at that height
|
||||
// It does not consider ANY blocks from miners that have "equivocated" (produced 2 blocks at the same height)
|
||||
func (cs *ChainStore) FormHeaviestTipSetForHeight(ctx context.Context, height abi.ChainEpoch) (*types.TipSet, types.BigInt, error) {
|
||||
cs.tstLk.Lock()
|
||||
defer cs.tstLk.Unlock()
|
||||
|
||||
all := []*types.BlockHeader{b}
|
||||
|
||||
tsets, ok := cs.tipsets[b.Height]
|
||||
blockCids, ok := cs.tipsets[height]
|
||||
if !ok {
|
||||
return types.NewTipSet(all)
|
||||
return nil, types.NewInt(0), nil
|
||||
}
|
||||
|
||||
inclMiners := map[address.Address]cid.Cid{b.Miner: b.Cid()}
|
||||
for _, bhc := range tsets {
|
||||
if bhc == b.Cid() {
|
||||
continue
|
||||
}
|
||||
// First, identify "bad" miners for the height
|
||||
|
||||
seenMiners := map[address.Address]struct{}{}
|
||||
badMiners := map[address.Address]struct{}{}
|
||||
blocks := make([]*types.BlockHeader, 0, len(blockCids))
|
||||
for _, bhc := range blockCids {
|
||||
h, err := cs.GetBlock(ctx, bhc)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
|
||||
return nil, types.NewInt(0), xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err)
|
||||
}
|
||||
|
||||
if cid, found := inclMiners[h.Miner]; found {
|
||||
log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", h.Miner, h.Height, h.Cid(), cid)
|
||||
if _, seen := seenMiners[h.Miner]; seen {
|
||||
badMiners[h.Miner] = struct{}{}
|
||||
continue
|
||||
}
|
||||
seenMiners[h.Miner] = struct{}{}
|
||||
blocks = append(blocks, h)
|
||||
}
|
||||
|
||||
if types.CidArrsEqual(h.Parents, b.Parents) {
|
||||
all = append(all, h)
|
||||
inclMiners[h.Miner] = bhc
|
||||
// Next, group by parent tipset
|
||||
|
||||
formableTipsets := make(map[types.TipSetKey][]*types.BlockHeader, 0)
|
||||
for _, h := range blocks {
|
||||
if _, bad := badMiners[h.Miner]; bad {
|
||||
continue
|
||||
}
|
||||
ptsk := types.NewTipSetKey(h.Parents...)
|
||||
formableTipsets[ptsk] = append(formableTipsets[ptsk], h)
|
||||
}
|
||||
|
||||
maxWeight := types.NewInt(0)
|
||||
var maxTs *types.TipSet
|
||||
for _, headers := range formableTipsets {
|
||||
ts, err := types.NewTipSet(headers)
|
||||
if err != nil {
|
||||
return nil, types.NewInt(0), xerrors.Errorf("unexpected error forming tipset: %w", err)
|
||||
}
|
||||
|
||||
weight, err := cs.Weight(ctx, ts)
|
||||
if err != nil {
|
||||
return nil, types.NewInt(0), xerrors.Errorf("failed to calculate weight: %w", err)
|
||||
}
|
||||
|
||||
heavier := weight.GreaterThan(maxWeight)
|
||||
if weight.Equals(maxWeight) {
|
||||
heavier = breakWeightTie(ts, maxTs)
|
||||
}
|
||||
|
||||
if heavier {
|
||||
maxWeight = weight
|
||||
maxTs = ts
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: other validation...?
|
||||
|
||||
return types.NewTipSet(all)
|
||||
return maxTs, maxWeight, nil
|
||||
}
|
||||
|
||||
func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) {
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
||||
@ -238,3 +239,171 @@ func TestChainExportImportFull(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEquivocations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var last *types.TipSet
|
||||
for i := 0; i < 10; i++ {
|
||||
ts, err := cg.NextTipSet()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
last = ts.TipSet.TipSet()
|
||||
}
|
||||
|
||||
mTs, err := cg.NextTipSetFromMiners(last, []address.Address{last.Blocks()[0].Miner}, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(mTs.TipSet.TipSet().Cids()))
|
||||
last = mTs.TipSet.TipSet()
|
||||
|
||||
require.NotEmpty(t, last.Blocks())
|
||||
blk1 := *last.Blocks()[0]
|
||||
|
||||
// quick check: asking to form tipset at latest height just returns head
|
||||
bestHead, bestHeadWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, last.Key(), bestHead.Key())
|
||||
require.Contains(t, last.Cids(), blk1.Cid())
|
||||
expectedWeight, err := cg.ChainStore().Weight(ctx, bestHead)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||
|
||||
// add another block by a different miner -- it should get included in the best tipset
|
||||
blk2 := blk1
|
||||
blk1Miner, err := address.IDFromAddress(blk2.Miner)
|
||||
require.NoError(t, err)
|
||||
blk2.Miner, err = address.NewIDAddress(blk1Miner + 50)
|
||||
require.NoError(t, err)
|
||||
addBlockToTracker(t, cg.ChainStore(), &blk2)
|
||||
|
||||
bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||
require.NoError(t, err)
|
||||
for _, blkCid := range last.Cids() {
|
||||
require.Contains(t, bestHead.Cids(), blkCid)
|
||||
}
|
||||
require.Contains(t, bestHead.Cids(), blk2.Cid())
|
||||
expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||
|
||||
// add another block by a different miner, but on a different tipset -- it should NOT get included
|
||||
blk3 := blk1
|
||||
blk3.Miner, err = address.NewIDAddress(blk1Miner + 100)
|
||||
require.NoError(t, err)
|
||||
blk1Parent, err := cg.ChainStore().GetBlock(ctx, blk3.Parents[0])
|
||||
require.NoError(t, err)
|
||||
blk3.Parents = blk1Parent.Parents
|
||||
addBlockToTracker(t, cg.ChainStore(), &blk3)
|
||||
|
||||
bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||
require.NoError(t, err)
|
||||
for _, blkCid := range last.Cids() {
|
||||
require.Contains(t, bestHead.Cids(), blkCid)
|
||||
}
|
||||
require.Contains(t, bestHead.Cids(), blk2.Cid())
|
||||
require.NotContains(t, bestHead.Cids(), blk3.Cid())
|
||||
expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||
|
||||
// add another block by the same miner as blk1 -- it should NOT get included, and blk1 should be excluded too
|
||||
blk4 := blk1
|
||||
blk4.Timestamp = blk1.Timestamp + 1
|
||||
addBlockToTracker(t, cg.ChainStore(), &blk4)
|
||||
|
||||
bestHead, bestHeadWeight, err = cg.ChainStore().FormHeaviestTipSetForHeight(ctx, last.Height())
|
||||
require.NoError(t, err)
|
||||
for _, blkCid := range last.Cids() {
|
||||
if blkCid != blk1.Cid() {
|
||||
require.Contains(t, bestHead.Cids(), blkCid)
|
||||
}
|
||||
}
|
||||
require.NotContains(t, bestHead.Cids(), blk4.Cid())
|
||||
require.NotContains(t, bestHead.Cids(), blk1.Cid())
|
||||
expectedWeight, err = cg.ChainStore().Weight(ctx, bestHead)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedWeight, bestHeadWeight)
|
||||
|
||||
// check that after all of that, the chainstore's head has NOT changed
|
||||
require.Equal(t, last.Key(), cg.ChainStore().GetHeaviestTipSet().Key())
|
||||
|
||||
// NOW, after all that, notify the chainstore to refresh its head
|
||||
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
|
||||
|
||||
originalHead := *last
|
||||
newHead := cg.ChainStore().GetHeaviestTipSet()
|
||||
// the newHead should be at the same height as the originalHead
|
||||
require.Equal(t, originalHead.Height(), newHead.Height())
|
||||
// the newHead should NOT be the same as the originalHead
|
||||
require.NotEqual(t, originalHead.Key(), newHead.Key())
|
||||
// specifically, it should not contain any blocks by blk1Miner
|
||||
for _, b := range newHead.Blocks() {
|
||||
require.NotEqual(t, blk1.Miner, b.Miner)
|
||||
}
|
||||
|
||||
// now have blk2's Miner equivocate too! this causes us to switch to a tipset with a different parent!
|
||||
blk5 := blk2
|
||||
blk5.Timestamp = blk5.Timestamp + 1
|
||||
addBlockToTracker(t, cg.ChainStore(), &blk5)
|
||||
|
||||
// notify the chainstore to refresh its head
|
||||
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
|
||||
newHead = cg.ChainStore().GetHeaviestTipSet()
|
||||
// the newHead should still be at the same height as the originalHead
|
||||
require.Equal(t, originalHead.Height(), newHead.Height())
|
||||
// BUT it should no longer have the same parents -- only blk3's miner is good, and they mined on a different tipset
|
||||
require.Equal(t, 1, len(newHead.Blocks()))
|
||||
require.Equal(t, blk3.Cid(), newHead.Cids()[0])
|
||||
require.NotEqual(t, originalHead.Parents(), newHead.Parents())
|
||||
|
||||
// now have blk3's Miner equivocate too! this causes us to switch to a previous epoch entirely :(
|
||||
blk6 := blk3
|
||||
blk6.Timestamp = blk6.Timestamp + 1
|
||||
addBlockToTracker(t, cg.ChainStore(), &blk6)
|
||||
|
||||
// trying to form a tipset at our previous height leads to emptiness
|
||||
tryTs, tryTsWeight, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blk1.Height)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, tryTs)
|
||||
require.True(t, tryTsWeight.IsZero())
|
||||
|
||||
// notify the chainstore to refresh its head
|
||||
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blk1.Height+1))
|
||||
newHead = cg.ChainStore().GetHeaviestTipSet()
|
||||
// the newHead should now be one epoch behind originalHead
|
||||
require.Greater(t, originalHead.Height(), newHead.Height())
|
||||
|
||||
// next, we create a new tipset with only one block after many null rounds
|
||||
headAfterNulls, err := cg.NextTipSetFromMiners(newHead, []address.Address{newHead.Blocks()[0].Miner}, 15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(headAfterNulls.TipSet.Blocks))
|
||||
|
||||
// now, we disqualify the block in this tipset because of equivocation
|
||||
blkAfterNulls := headAfterNulls.TipSet.TipSet().Blocks()[0]
|
||||
equivocatedBlkAfterNulls := *blkAfterNulls
|
||||
equivocatedBlkAfterNulls.Timestamp = blkAfterNulls.Timestamp + 1
|
||||
addBlockToTracker(t, cg.ChainStore(), &equivocatedBlkAfterNulls)
|
||||
|
||||
// try to form a tipset at this height -- it should be empty
|
||||
tryTs2, tryTsWeight2, err := cg.ChainStore().FormHeaviestTipSetForHeight(ctx, blkAfterNulls.Height)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, tryTs2)
|
||||
require.True(t, tryTsWeight2.IsZero())
|
||||
|
||||
// now we "notify" at this height -- it should lead to no head change because there's no formable head in near epochs
|
||||
require.NoError(t, cg.ChainStore().RefreshHeaviestTipSet(ctx, blkAfterNulls.Height))
|
||||
require.True(t, headAfterNulls.TipSet.TipSet().Equals(cg.ChainStore().GetHeaviestTipSet()))
|
||||
}
|
||||
|
||||
func addBlockToTracker(t *testing.T, cs *store.ChainStore, blk *types.BlockHeader) {
|
||||
blk2Ts, err := types.NewTipSet([]*types.BlockHeader{blk})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, cs.PersistTipsets(context.TODO(), []*types.TipSet{blk2Ts}))
|
||||
require.NoError(t, cs.AddToTipSetTracker(context.TODO(), blk))
|
||||
}
|
||||
|
@ -350,6 +350,7 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
||||
)
|
||||
recordFailure(ctx, metrics.MessageValidationFailure, "add")
|
||||
switch {
|
||||
|
||||
case xerrors.Is(err, messagepool.ErrSoftValidationFailure):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrRBFTooLowPremium):
|
||||
@ -362,8 +363,17 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrNonceTooLow):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrNotEnoughFunds):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrExistingNonce):
|
||||
return pubsub.ValidationIgnore
|
||||
|
||||
case xerrors.Is(err, messagepool.ErrMessageTooBig):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrMessageValueTooHigh):
|
||||
fallthrough
|
||||
case xerrors.Is(err, messagepool.ErrInvalidToAddr):
|
||||
fallthrough
|
||||
default:
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
@ -519,9 +529,8 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
||||
|
||||
msgCid := idxrMsg.Cid
|
||||
|
||||
var msgInfo *peerMsgInfo
|
||||
msgInfo, ok := v.peerCache.Get(minerAddr)
|
||||
if !ok {
|
||||
msgInfo, cached := v.peerCache.Get(minerAddr)
|
||||
if !cached {
|
||||
msgInfo = &peerMsgInfo{}
|
||||
}
|
||||
|
||||
@ -529,17 +538,17 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
||||
msgInfo.mutex.Lock()
|
||||
defer msgInfo.mutex.Unlock()
|
||||
|
||||
if ok {
|
||||
var seqno uint64
|
||||
if cached {
|
||||
// Reject replayed messages.
|
||||
seqno := binary.BigEndian.Uint64(msg.Message.GetSeqno())
|
||||
seqno = binary.BigEndian.Uint64(msg.Message.GetSeqno())
|
||||
if seqno <= msgInfo.lastSeqno {
|
||||
log.Debugf("ignoring replayed indexer message")
|
||||
return pubsub.ValidationIgnore
|
||||
}
|
||||
msgInfo.lastSeqno = seqno
|
||||
}
|
||||
|
||||
if !ok || originPeer != msgInfo.peerID {
|
||||
if !cached || originPeer != msgInfo.peerID {
|
||||
// Check that the miner ID maps to the peer that sent the message.
|
||||
err = v.authenticateMessage(ctx, minerAddr, originPeer)
|
||||
if err != nil {
|
||||
@ -548,7 +557,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
||||
return pubsub.ValidationReject
|
||||
}
|
||||
msgInfo.peerID = originPeer
|
||||
if !ok {
|
||||
if !cached {
|
||||
// Add msgInfo to cache only after being authenticated. If two
|
||||
// messages from the same peer are handled concurrently, there is a
|
||||
// small chance that one msgInfo could replace the other here when
|
||||
@ -557,6 +566,9 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
|
||||
}
|
||||
}
|
||||
|
||||
// Update message info cache with the latest message's sequence number.
|
||||
msgInfo.lastSeqno = seqno
|
||||
|
||||
// See if message needs to be ignored due to rate limiting.
|
||||
if v.rateLimitPeer(msgInfo, msgCid) {
|
||||
return pubsub.ValidationIgnore
|
||||
|
@ -12,10 +12,12 @@ import (
|
||||
"github.com/ipni/go-libipni/announce/message"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/mocks"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
@ -134,3 +136,123 @@ func TestIndexerMessageValidator_Validate(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIdxValidator(t *testing.T) {
|
||||
validCid, err := cid.Decode("QmbpDgg5kRLDgMxS8vPKNFXEcA6D5MC4CkuUdSWDVtHPGK")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addr, err := address.NewFromString("f01024")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf1, err := addr.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
selfPID := "12D3KooWQiCbqEStCkdqUvr69gQsrp9urYJZUCkzsQXia7mbqbFW"
|
||||
senderPID := "12D3KooWE8yt84RVwW3sFcd6WMjbUdWrZer2YtT4dmtj3dHdahSZ"
|
||||
extraData := buf1
|
||||
|
||||
mc := gomock.NewController(t)
|
||||
node := mocks.NewMockFullNode(mc)
|
||||
node.EXPECT().ChainHead(gomock.Any()).Return(nil, nil).AnyTimes()
|
||||
|
||||
subject := NewIndexerMessageValidator(peer.ID(selfPID), node, node)
|
||||
message := message.Message{
|
||||
Cid: validCid,
|
||||
Addrs: nil,
|
||||
ExtraData: extraData,
|
||||
}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := message.MarshalCBOR(buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
topic := "topic"
|
||||
|
||||
privk, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id, err := peer.IDFromPublicKey(privk.GetPublic())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{PeerId: &id}, nil).AnyTimes()
|
||||
|
||||
pbm := &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
From: []byte(id),
|
||||
Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 2},
|
||||
}
|
||||
validate := subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
|
||||
Message: pbm,
|
||||
ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
|
||||
ValidatorData: nil,
|
||||
})
|
||||
if validate != pubsub.ValidationAccept {
|
||||
t.Error("Expected to receive ValidationAccept")
|
||||
}
|
||||
msgInfo, cached := subject.peerCache.Get(addr)
|
||||
if !cached {
|
||||
t.Fatal("Message info should be in cache")
|
||||
}
|
||||
seqno := msgInfo.lastSeqno
|
||||
msgInfo.rateLimit = nil // prevent interference from rate limiting
|
||||
|
||||
t.Log("Sending DoS msg")
|
||||
privk, _, err = crypto.GenerateKeyPair(crypto.RSA, 2048)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id2, err := peer.IDFromPublicKey(privk.GetPublic())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pbm = &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
From: []byte(id2),
|
||||
Seqno: []byte{255, 255, 255, 255, 255, 255, 255, 255},
|
||||
}
|
||||
validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
|
||||
Message: pbm,
|
||||
ReceivedFrom: peer.ID(senderPID),
|
||||
ValidatorData: nil,
|
||||
})
|
||||
if validate != pubsub.ValidationReject {
|
||||
t.Error("Expected to get ValidationReject")
|
||||
}
|
||||
msgInfo, cached = subject.peerCache.Get(addr)
|
||||
if !cached {
|
||||
t.Fatal("Message info should be in cache")
|
||||
}
|
||||
msgInfo.rateLimit = nil // prevent interference from rate limiting
|
||||
|
||||
// Check if DoS is possible.
|
||||
if msgInfo.lastSeqno != seqno {
|
||||
t.Fatal("Sequence number should not have been updated")
|
||||
}
|
||||
|
||||
t.Log("Sending another valid message from miner...")
|
||||
pbm = &pb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
From: []byte(id),
|
||||
Seqno: []byte{1, 1, 1, 1, 2, 2, 2, 3},
|
||||
}
|
||||
validate = subject.Validate(context.Background(), peer.ID(senderPID), &pubsub.Message{
|
||||
Message: pbm,
|
||||
ReceivedFrom: peer.ID("f01024"), // peer.ID(senderPID),
|
||||
ValidatorData: nil,
|
||||
})
|
||||
if validate != pubsub.ValidationAccept {
|
||||
t.Fatal("Did not receive ValidationAccept")
|
||||
}
|
||||
}
|
||||
|
61
chain/sub/ratelimit/queue_test.go
Normal file
61
chain/sub/ratelimit/queue_test.go
Normal file
@ -0,0 +1,61 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestQueue(t *testing.T) {
|
||||
const size = 3
|
||||
q := &queue{buf: make([]int64, size)}
|
||||
|
||||
if q.len() != 0 {
|
||||
t.Fatalf("q.len() = %d, expect 0", q.len())
|
||||
}
|
||||
|
||||
if q.cap() != size {
|
||||
t.Fatalf("q.cap() = %d, expect %d", q.cap(), size)
|
||||
}
|
||||
|
||||
for i := int64(0); i < int64(size); i++ {
|
||||
err := q.push(i)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot push element %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
if q.len() != size {
|
||||
t.Fatalf("q.len() = %d, expect %d", q.len(), size)
|
||||
}
|
||||
|
||||
err := q.push(int64(size))
|
||||
if err != ErrRateLimitExceeded {
|
||||
t.Fatalf("pushing element beyond capacity should have failed with err: %s, got %s", ErrRateLimitExceeded, err)
|
||||
}
|
||||
|
||||
if q.front() != 0 {
|
||||
t.Fatalf("q.front() = %d, expect 0", q.front())
|
||||
}
|
||||
|
||||
if q.back() != int64(size-1) {
|
||||
t.Fatalf("q.back() = %d, expect %d", q.back(), size-1)
|
||||
}
|
||||
|
||||
popVal := q.pop()
|
||||
if popVal != 0 {
|
||||
t.Fatalf("q.pop() = %d, expect 0", popVal)
|
||||
}
|
||||
|
||||
if q.len() != size-1 {
|
||||
t.Fatalf("q.len() = %d, expect %d", q.len(), size-1)
|
||||
}
|
||||
|
||||
// Testing truncation.
|
||||
threshold := int64(1)
|
||||
q.truncate(threshold)
|
||||
if q.len() != 1 {
|
||||
t.Fatalf("q.len() after truncate = %d, expect 1", q.len())
|
||||
}
|
||||
if q.front() != 2 {
|
||||
t.Fatalf("q.front() after truncate = %d, expect 2", q.front())
|
||||
}
|
||||
}
|
@ -536,7 +536,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {
|
||||
|
||||
// At this point we have accepted and synced to the new `maybeHead`
|
||||
// (`StageSyncComplete`).
|
||||
if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil {
|
||||
if err := syncer.store.RefreshHeaviestTipSet(ctx, maybeHead.Height()); err != nil {
|
||||
span.AddAttributes(trace.StringAttribute("put_error", err.Error()))
|
||||
span.SetStatus(trace.Status{
|
||||
Code: 13,
|
||||
|
@ -92,6 +92,7 @@ type syncManager struct {
|
||||
var _ SyncManager = (*syncManager)(nil)
|
||||
|
||||
type peerHead struct {
|
||||
// Note: this doesn't _necessarily_ mean that p's head is ts, just that ts is a tipset that p sent to us
|
||||
p peer.ID
|
||||
ts *types.TipSet
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user