Compare commits

..

4 Commits

Author SHA1 Message Date
Ian Norden
0c3e6feeaa
Merge pull request #4 from vulcanize/ian_update
Add indexes to `event.emitter_addr` and `event_entry.key`
2023-12-01 10:03:15 -06:00
i-norden
f1017b18d8 add index to event.emitter_addr 2023-11-30 13:03:36 -06:00
i-norden
ab151d6708 add index to event_entry.key 2023-11-30 13:00:34 -06:00
Ian Norden
658884f7e7 fix: api: exclude reverted events in eth_getLogs results (#11318)
* exclude reverted events from results returned by eth_getLogs

* unit test

* update CHANGELOG.md
2023-11-20 10:48:16 -06:00
437 changed files with 6337 additions and 27204 deletions

View File

@ -1,18 +1,18 @@
version: 2.1 version: 2.1
orbs: orbs:
aws-cli: circleci/aws-cli@4.1.1 aws-cli: circleci/aws-cli@1.3.2
docker: circleci/docker@2.3.0 docker: circleci/docker@2.1.4
executors: executors:
golang: golang:
docker: docker:
# Must match GO_VERSION_MIN in project root # Must match GO_VERSION_MIN in project root
- image: cimg/go:1.21.7 - image: cimg/go:1.20.7
resource_class: medium+ resource_class: medium+
golang-2xl: golang-2xl:
docker: docker:
# Must match GO_VERSION_MIN in project root # Must match GO_VERSION_MIN in project root
- image: cimg/go:1.21.7 - image: cimg/go:1.20.7
resource_class: 2xlarge resource_class: 2xlarge
ubuntu: ubuntu:
docker: docker:
@ -70,6 +70,8 @@ commands:
name: Restore parameters cache name: Restore parameters cache
keys: keys:
- 'v26-2k-lotus-params' - 'v26-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048 - run: ./lotus fetch-params 2048
- save_cache: - save_cache:
name: Save parameters cache name: Save parameters cache
@ -94,7 +96,6 @@ commands:
git fetch --all git fetch --all
install-ubuntu-deps: install-ubuntu-deps:
steps: steps:
- run: sudo apt install curl ca-certificates gnupg
- run: sudo apt-get update - run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version: check-go-version:
@ -142,9 +143,9 @@ jobs:
Run tests with gotestsum. Run tests with gotestsum.
working_directory: ~/lotus working_directory: ~/lotus
parameters: &test-params parameters: &test-params
resource_class: executor:
type: string type: executor
default: medium+ default: golang
go-test-flags: go-test-flags:
type: string type: string
default: "-timeout 20m" default: "-timeout 20m"
@ -163,14 +164,7 @@ jobs:
type: string type: string
default: unit default: unit
description: Test suite name to report to CircleCI. description: Test suite name to report to CircleCI.
docker: executor: << parameters.executor >>
- image: cimg/go:1.21
environment:
LOTUS_HARMONYDB_HOSTS: yugabyte
- image: yugabytedb/yugabyte:2.18.0.0-b65
command: bin/yugabyted start --daemon=false
name: yugabyte
resource_class: << parameters.resource_class >>
steps: steps:
- install-ubuntu-deps - install-ubuntu-deps
- attach_workspace: - attach_workspace:
@ -188,8 +182,6 @@ jobs:
command: | command: |
mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts mkdir -p /tmp/test-artifacts
dockerize -wait tcp://yugabyte:5433 -timeout 3m
env
gotestsum \ gotestsum \
--format standard-verbose \ --format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
@ -217,9 +209,7 @@ jobs:
Branch on github.com/filecoin-project/test-vectors to checkout and Branch on github.com/filecoin-project/test-vectors to checkout and
test with. If empty (the default) the commit defined by the git test with. If empty (the default) the commit defined by the git
submodule is used. submodule is used.
docker: executor: << parameters.executor >>
- image: cimg/go:1.21
resource_class: << parameters.resource_class >>
steps: steps:
- install-ubuntu-deps - install-ubuntu-deps
- attach_workspace: - attach_workspace:
@ -406,14 +396,15 @@ jobs:
Run golangci-lint. Run golangci-lint.
working_directory: ~/lotus working_directory: ~/lotus
parameters: parameters:
executor:
type: executor
default: golang
args: args:
type: string type: string
default: '' default: ''
description: | description: |
Arguments to pass to golangci-lint Arguments to pass to golangci-lint
docker: executor: << parameters.executor >>
- image: cimg/go:1.21
resource_class: medium+
steps: steps:
- install-ubuntu-deps - install-ubuntu-deps
- attach_workspace: - attach_workspace:
@ -584,7 +575,7 @@ workflows:
- build - build
suite: itest-deals_concurrent suite: itest-deals_concurrent
target: "./itests/deals_concurrent_test.go" target: "./itests/deals_concurrent_test.go"
resource_class: 2xlarge executor: golang-2xl
- test: - test:
name: test-itest-deals_invalid_utf8_label name: test-itest-deals_invalid_utf8_label
requires: requires:
@ -663,18 +654,6 @@ workflows:
- build - build
suite: itest-decode_params suite: itest-decode_params
target: "./itests/decode_params_test.go" target: "./itests/decode_params_test.go"
- test:
name: test-itest-direct_data_onboard
requires:
- build
suite: itest-direct_data_onboard
target: "./itests/direct_data_onboard_test.go"
- test:
name: test-itest-direct_data_onboard_verified
requires:
- build
suite: itest-direct_data_onboard_verified
target: "./itests/direct_data_onboard_verified_test.go"
- test: - test:
name: test-itest-dup_mpool_messages name: test-itest-dup_mpool_messages
requires: requires:
@ -789,18 +768,6 @@ workflows:
- build - build
suite: itest-get_messages_in_ts suite: itest-get_messages_in_ts
target: "./itests/get_messages_in_ts_test.go" target: "./itests/get_messages_in_ts_test.go"
- test:
name: test-itest-harmonydb
requires:
- build
suite: itest-harmonydb
target: "./itests/harmonydb_test.go"
- test:
name: test-itest-harmonytask
requires:
- build
suite: itest-harmonytask
target: "./itests/harmonytask_test.go"
- test: - test:
name: test-itest-lite_migration name: test-itest-lite_migration
requires: requires:
@ -945,7 +912,6 @@ workflows:
- build - build
suite: itest-sector_pledge suite: itest-sector_pledge
target: "./itests/sector_pledge_test.go" target: "./itests/sector_pledge_test.go"
resource_class: 2xlarge
get-params: true get-params: true
- test: - test:
@ -1010,14 +976,14 @@ workflows:
- build - build
suite: itest-wdpost_worker_config suite: itest-wdpost_worker_config
target: "./itests/wdpost_worker_config_test.go" target: "./itests/wdpost_worker_config_test.go"
resource_class: 2xlarge executor: golang-2xl
- test: - test:
name: test-itest-worker name: test-itest-worker
requires: requires:
- build - build
suite: itest-worker suite: itest-worker
target: "./itests/worker_test.go" target: "./itests/worker_test.go"
resource_class: 2xlarge executor: golang-2xl
- test: - test:
name: test-itest-worker_upgrade name: test-itest-worker_upgrade
requires: requires:
@ -1030,28 +996,32 @@ workflows:
- build - build
suite: utest-unit-cli suite: utest-unit-cli
target: "./cli/... ./cmd/... ./api/..." target: "./cli/... ./cmd/... ./api/..."
resource_class: 2xlarge
get-params: true get-params: true
executor: golang-2xl
- test: - test:
name: test-unit-node name: test-unit-node
requires: requires:
- build - build
suite: utest-unit-node suite: utest-unit-node
target: "./node/..." target: "./node/..."
- test: - test:
name: test-unit-rest name: test-unit-rest
requires: requires:
- build - build
suite: utest-unit-rest suite: utest-unit-rest
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..." target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./tools/..."
resource_class: 2xlarge
executor: golang-2xl
- test: - test:
name: test-unit-storage name: test-unit-storage
requires: requires:
- build - build
suite: utest-unit-storage suite: utest-unit-storage
target: "./storage/... ./extern/..." target: "./storage/... ./extern/..."
get-params: true
- test: - test:
go-test-flags: "-run=TestMulticoreSDR" go-test-flags: "-run=TestMulticoreSDR"
requires: requires:

View File

@ -10,25 +10,11 @@ import (
"text/template" "text/template"
) )
var GoVersion = "" // from init below. Ex: 1.19.7
//go:generate go run ./gen.go .. //go:generate go run ./gen.go ..
//go:embed template.yml //go:embed template.yml
var templateFile embed.FS var templateFile embed.FS
func init() {
b, err := os.ReadFile("../go.mod")
if err != nil {
panic("cannot find go.mod in parent folder")
}
for _, line := range strings.Split(string(b), "\n") {
if strings.HasPrefix(line, "go ") {
GoVersion = line[3:]
}
}
}
type ( type (
dirs = []string dirs = []string
suite = string suite = string
@ -125,7 +111,6 @@ func main() {
Networks []string Networks []string
ItestFiles []string ItestFiles []string
UnitSuites map[string]string UnitSuites map[string]string
GoVersion string
} }
in := data{ in := data{
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"}, Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
@ -140,7 +125,6 @@ func main() {
} }
return ret return ret
}(), }(),
GoVersion: GoVersion,
} }
out, err := os.Create("./config.yml") out, err := os.Create("./config.yml")

View File

@ -1,18 +1,18 @@
version: 2.1 version: 2.1
orbs: orbs:
aws-cli: circleci/aws-cli@4.1.1 aws-cli: circleci/aws-cli@1.3.2
docker: circleci/docker@2.3.0 docker: circleci/docker@2.1.4
executors: executors:
golang: golang:
docker: docker:
# Must match GO_VERSION_MIN in project root # Must match GO_VERSION_MIN in project root
- image: cimg/go:1.21.7 - image: cimg/go:1.20.7
resource_class: medium+ resource_class: medium+
golang-2xl: golang-2xl:
docker: docker:
# Must match GO_VERSION_MIN in project root # Must match GO_VERSION_MIN in project root
- image: cimg/go:1.21.7 - image: cimg/go:1.20.7
resource_class: 2xlarge resource_class: 2xlarge
ubuntu: ubuntu:
docker: docker:
@ -70,6 +70,8 @@ commands:
name: Restore parameters cache name: Restore parameters cache
keys: keys:
- 'v26-2k-lotus-params' - 'v26-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048 - run: ./lotus fetch-params 2048
- save_cache: - save_cache:
name: Save parameters cache name: Save parameters cache
@ -94,7 +96,6 @@ commands:
git fetch --all git fetch --all
install-ubuntu-deps: install-ubuntu-deps:
steps: steps:
- run: sudo apt install curl ca-certificates gnupg
- run: sudo apt-get update - run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev - run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version: check-go-version:
@ -142,9 +143,9 @@ jobs:
Run tests with gotestsum. Run tests with gotestsum.
working_directory: ~/lotus working_directory: ~/lotus
parameters: &test-params parameters: &test-params
resource_class: executor:
type: string type: executor
default: medium+ default: golang
go-test-flags: go-test-flags:
type: string type: string
default: "-timeout 20m" default: "-timeout 20m"
@ -163,14 +164,7 @@ jobs:
type: string type: string
default: unit default: unit
description: Test suite name to report to CircleCI. description: Test suite name to report to CircleCI.
docker: executor: << parameters.executor >>
- image: cimg/go:[[ .GoVersion]]
environment:
LOTUS_HARMONYDB_HOSTS: yugabyte
- image: yugabytedb/yugabyte:2.18.0.0-b65
command: bin/yugabyted start --daemon=false
name: yugabyte
resource_class: << parameters.resource_class >>
steps: steps:
- install-ubuntu-deps - install-ubuntu-deps
- attach_workspace: - attach_workspace:
@ -188,8 +182,6 @@ jobs:
command: | command: |
mkdir -p /tmp/test-reports/<< parameters.suite >> mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts mkdir -p /tmp/test-artifacts
dockerize -wait tcp://yugabyte:5433 -timeout 3m
env
gotestsum \ gotestsum \
--format standard-verbose \ --format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
@ -217,9 +209,7 @@ jobs:
Branch on github.com/filecoin-project/test-vectors to checkout and Branch on github.com/filecoin-project/test-vectors to checkout and
test with. If empty (the default) the commit defined by the git test with. If empty (the default) the commit defined by the git
submodule is used. submodule is used.
docker: executor: << parameters.executor >>
- image: cimg/go:[[ .GoVersion]]
resource_class: << parameters.resource_class >>
steps: steps:
- install-ubuntu-deps - install-ubuntu-deps
- attach_workspace: - attach_workspace:
@ -406,14 +396,15 @@ jobs:
Run golangci-lint. Run golangci-lint.
working_directory: ~/lotus working_directory: ~/lotus
parameters: parameters:
executor:
type: executor
default: golang
args: args:
type: string type: string
default: '' default: ''
description: | description: |
Arguments to pass to golangci-lint Arguments to pass to golangci-lint
docker: executor: << parameters.executor >>
- image: cimg/go:[[ .GoVersion]]
resource_class: medium+
steps: steps:
- install-ubuntu-deps - install-ubuntu-deps
- attach_workspace: - attach_workspace:
@ -551,8 +542,8 @@ workflows:
- build - build
suite: itest-[[ $name ]] suite: itest-[[ $name ]]
target: "./itests/[[ $file ]]" target: "./itests/[[ $file ]]"
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config") (eq $name "sector_pledge")]] [[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]]
resource_class: 2xlarge executor: golang-2xl
[[- end]] [[- end]]
[[- if or (eq $name "wdpost") (eq $name "sector_pledge")]] [[- if or (eq $name "wdpost") (eq $name "sector_pledge")]]
get-params: true get-params: true
@ -566,16 +557,9 @@ workflows:
- build - build
suite: utest-[[ $suite ]] suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]" target: "[[ $pkgs ]]"
[[- if eq $suite "unit-storage"]] [[if eq $suite "unit-cli"]]get-params: true[[end]]
get-params: true [[if eq $suite "unit-cli"]]executor: golang-2xl[[end]]
[[- end -]] [[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
[[- if eq $suite "unit-cli"]]
resource_class: 2xlarge
get-params: true
[[- end -]]
[[- if eq $suite "unit-rest"]]
resource_class: 2xlarge
[[- end -]]
[[- end]] [[- end]]
- test: - test:
go-test-flags: "-run=TestMulticoreSDR" go-test-flags: "-run=TestMulticoreSDR"

2
.gitignore vendored
View File

@ -6,7 +6,6 @@
/lotus-chainwatch /lotus-chainwatch
/lotus-shed /lotus-shed
/lotus-sim /lotus-sim
/lotus-provider
/lotus-townhall /lotus-townhall
/lotus-fountain /lotus-fountain
/lotus-stats /lotus-stats
@ -42,7 +41,6 @@ build/paramfetch.sh
bin/ipget bin/ipget
bin/tmp/* bin/tmp/*
.idea .idea
.vscode
scratchpad scratchpad
build/builtin-actors/v* build/builtin-actors/v*

View File

@ -14,7 +14,6 @@ linters:
- varcheck - varcheck
- deadcode - deadcode
- scopelint - scopelint
- unused
# We don't want to skip builtin/ # We don't want to skip builtin/
skip-dirs-use-default: false skip-dirs-use-default: false

View File

@ -1,465 +1,34 @@
# Lotus changelog # Lotus changelog
# UNRELEASED # v 1.25.0-rc5 / 2023-11-16
## New features This is the fifth release candidate of the upcoming OPTIONAL release Lotus v1.25.0. This optional release also supports the Filecoin network version 21 upgrade, codenamed Watermelon 🍉, in addition to the numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
## Improvements ## :exclamation: Mainnet Upgrade Date Update
# v1.26.2 / 2024-04-08 Mainnet upgrade date has been adjusted due to the rescope of nv21 & retesting on test network, as well as giving the community enough time to upgrade their infrastructure.
**This is a mandatory patch release for the Filecoin network version 22 mainnet upgrade, for all node operators.** **The new date and epoch for the Mainnet nv21 upgrade has therefore been set to epoch 3469380 - 2023-12-12T13:30:00Z.**
There is an update in the upgrade epoch for nv22, you can read the [full discussion in Slack here.](https://filecoinproject.slack.com/archives/C05P37R9KQD/p1712548103521969)
The new upgrade epoch is scheduled to be on **epoch `3855360 - 2024-04-24 - 14:00:00Z`**. That means: ### FIP0070 descoped for nv21
- **All mainnet node operators that have upgraded to v1.26.x, must upgrade to this patch release before 2024-04-11T14:00:00Z.** [FIP0070: Allow SPs to move partitions between deadlines](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0070.md) was originally scoped for network version 21. However, during the testing on calibrationnet testing, this FIP has proven to be quite error prune to implement due to the complexity of the protocol and a couple bugs were discovered.
- **All mainnet node operators that are on a version lower the v1.26.x, must upgrade to this patch release before 2024-04-24T14:00:00Z.**
This patch also includes fixes for node operators who want to index builtin-actor events after the nv22 upgrade. Specifically, it ensures the builtin actor event entries are ordered by insertion order when selected ([#11834](https://github.com/filecoin-project/lotus/pull/11834)). It also includes a couple Lotus-Miner patch fixes, ensuring that SnapDeals works properly and are using the new ProveReplicaUpdate3 message after the network version 22 upgrade, ensuring that DDO-sectors has the correct sector expirations, as well as DDO-sector visibility in the `lotus-miner sectors list` cmd. As the path to resolving this bug is not yet clear, core devs have decided to descope FIP-0070 from the upcoming nv21 upgrade ensure the security and stability of the network post upgrade.
## Upgrade Warnings
For users currently on a version of Lotus lower than v1.26.0, please note that **this release requires a minimum Go version of v1.21.7 or higher to successfully build Lotus.** ## Calibration WatermelonFix2
## v1.26.x Inclusions For the calibration network, the descoping of FIP0070 means that we will need to do another CodeCID migration to drop FIP0070 from nv21, and to get the test network into a state which will be similar to when the mainnet upgrades to nv21.
See the [v1.26.0](#v1260--2024-03-21) release notes below for inclusions and notes on the v1.26.x series. **This migration will happen at epoch 1108174 - 2023-11-21T13:00:00Z.**
* [v13 Builtin Actor Bundle](#v13-builtin-actor-bundle) Make sure to upgrade you calibration network nodes and storage providers to v1.24.0-rcx & ^ before this epoch.
* [Migration](#migration)
* [New features](#new-features-1)
* [Tracing API](#tracing-api)
* [Ethereum Tracing API (`trace_block` and `trace_replayBlockTransactions`)](#ethereum-tracing-api-trace_block-and-trace_replayblocktransactions)
* [GetActorEventsRaw and SubscribeActorEventsRaw](#getactoreventsraw-and-subscribeactoreventsraw)
* [Events Configuration Changes](#events-configuration-changes)
* [GetAllClaims and GetAllAlocations](#getallclaims-and-getallalocations)
* [Lotus CLI](#lotus-cli)
#v1260--2024-03-21
# v1.26.1 / 2024-03-27
***RETRACTED: Due to a change in network version 22 upgrade epoch, Lotus v1.26.1 should not be used prior to the new upgrade epoch. See v1.26.2 release notes above.***
**This is a patch release for the Calibration network user.** The Calibration network is scheduled for an upgrade to include the two additional built-in actor events to ease the transition and observability of DDO for the ecosystem ([#964](https://github.com/filecoin-project/FIPs/pull/964) and [#968](https://github.com/filecoin-project/FIPs/pull/968)).
The agreed-upon epoch between the Filecoin implementer team for the update is `1493854`, corresponding to `2024-04-03T11:00:00Z`. All Calibration network users need to upgrade to this patch release before that.
**Lotus Mainnet Users**: For users on the Mainnet, the [Lotus v1.26.0](https://github.com/filecoin-project/lotus/releases/tag/v1.26.0) release already includes the aforementioned events in preparation for the Mainnet nv22 upgrade. Therefore, both v1.26.0 and v1.26.1 versions are suitable for use on the Mainnet for the coming network version 22 upgrade.
# v1.26.0 / 2024-03-21
***RETRACTED: Due to a change in network version 22 upgrade epoch, Lotus v1.26.0 should not be used prior to the new upgrade epoch. See v1.26.2 release notes above.***
This is the stable release for the upcoming MANDATORY Filecoin network upgrade v22, codenamed Dragon 🐉, at `epoch 3817920 - 2024-04-11 - 14:00:00Z`
The Filecoin network version 22 delivers the following FIPs:
- [FIP-0063: Switching to new Drand mainnet network](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0063.md)
- [FIP-0074: Remove cron-based automatic deal settlement](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0074.md)
- [FIP-0076: Direct data onboarding](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md)
- [FIP-0083: Add built-in Actor events in the Verified Registry, Miner and Market Actors](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md)
## ☢️ Upgrade Warnings ☢️ ## ☢️ Upgrade Warnings ☢️
- This release requires a minimum Go version of v1.21.7 or higher to successfully build Lotus. - Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this version.
## v13 Builtin Actor Bundle
[Builtin actor v13.0.0](https://github.com/filecoin-project/builtin-actors/releases/tag/v13.0.0) is used for supporting this upgrade. Make sure that your lotus actor bundle matches the v13 actors manifest by running the following cli after upgrading:
```
lotus state actor-cids --network-version=22
Network Version: 22
Actor Version: 13
Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e
Actor CID
account bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52
cron bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc
datacap bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci
eam bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk
ethaccount bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei
evm bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q
init bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai
multisig bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc
paymentchannel bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2
placeholder bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro
reward bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu
storagemarket bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu
storageminer bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2
storagepower bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e
system bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e
verifiedregistry bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta
```
## Migration
We are expecting a bit heavier than normal state migration for this upgrade due to the amount of state changes introduced with Direct Data Onboarding.
All node operators, including storage providers, should be aware that ONE pre-migration is being scheduled 120 epochs before the upgrade. It will take around 10-20 minutes for the pre-migration and less than 30 seconds for the final migration, depending on the amount of historical state in the node blockstore and the hardware specs the node is running on. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries
We recommend node operators (who haven't enabled splitstore discard mode) that do not care about historical chain states, to prune the chain blockstore by syncing from a snapshot 1-2 days before the upgrade.
You can test out the migration by running running the [`benchmarking a network migration` tutorial.](https://lotus.filecoin.io/kb/test-migration/)
For certain node operators, such as full archival nodes or systems that need to keep large amounts of state (RPC providers), completing the pre-migration in time before the network upgrade might not be achievable. For those node operators, it is recommended to skip the pre-migration and run the non-cached migration (i.e., just running the migration at the exact upgrade epoch), and schedule for some downtime during the upgrade epoch. Operators of such nodes can read the [`How to disable premigration in network upgrade` tutorial.](https://lotus.filecoin.io/kb/disable-premigration/)
## New features
- feat: api: new verified registry methods to get all allocations and claims (#11631) ([filecoin-project/lotus#11631](https://github.com/filecoin-project/lotus/pull/11631))
- feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) ([filecoin-project/lotus#11226](https://github.com/filecoin-project/lotus/pull/11226))
- feat: implement FIP-0063 ([filecoin-project/lotus#11572](https://github.com/filecoin-project/lotus/pull/11572))
- feat: events: Add Lotus APIs to consume smart contract and built-in actor events ([filecoin-project/lotus#11618](https://github.com/filecoin-project/lotus/pull/11618))
### Tracing API
Replace the `CodeCid` field in the message trace (added in 1.23.4) with an `InvokedActor` field.
**Before:**
```javascript
{
"Msg": {
"From": ...,
"To": ...,
...
"CodeCid": ... // The actor's code CID.
}
"MsgRct": ...,
"GasCharges": [],
"Subcalls": [],
}
```
**After:**
```javascript
{
"Msg": {
"From": ...,
"To": ...
}
"InvokedActor": { // The invoked actor (ommitted if the actor wasn't invoked).
"Id": 1234, // The ID of the actor.
"State": { // The actor's state object (may change between network versions).
"Code": ..., // The actor's code CID.
"Head": ..., // The actor's state-root (when invoked).
"CallSeqNum": ..., // The actor's nonce.
"Balance": ..., // The actor's balance (when invoked).
"Address": ..., // Delegated address (FEVM only).
}
}
"MsgRct": ...,
"GasCharges": [],
"Subcalls": [],
}
```
This means the trace now contains an accurate "snapshot" of the actor at the time of the call, information that may not be present in the final state-tree (e.g., due to reverts). This will hopefully improve the performance and accuracy of indexing services.
### Ethereum Tracing API (`trace_block` and `trace_replayBlockTransactions`)
For those with the Ethereum JSON-RPC API enabled, the experimental Ethereum Tracing API has been improved significantly and should be considered "functional". However, it's still new and should be tested extensively before relying on it. This API translates FVM traces to Ethereum-style traces, implementing the OpenEthereum `trace_block` and `trace_replayBlockTransactions` APIs.
This release fixes numerous bugs with this API and now ABI-encodes non-EVM inputs/outputs as if they were explicit EVM calls to [`handle_filecoin_method`][handlefilecoinmethod] for better block explorer compatibility.
However, there are some _significant_ limitations:
1. The Geth APIs are not implemented, only the OpenEthereum (Erigon, etc.) APIs.
2. Block rewards are not (yet) included in the trace.
3. Selfdestruct operations are not included in the trace.
4. EVM smart contract "create" events always specify `0xfe` as the "code" for newly created EVM smart contracts.
Additionally, Filecoin is not Ethereum no matter how much we try to provide API/tooling compatibility. This API attempts to translate Filecoin semantics into Ethereum semantics as accurately as possible, but it's hardly the best source of data unless you _need_ Filecoin to look like an Ethereum compatible chain. If you're trying to build a new integration with Filecoin, please use the native `StateCompute` method instead.
[handlefilecoinmethod]: https://fips.filecoin.io/FIPS/fip-0054.html#handlefilecoinmethod-general-handler-for-method-numbers--1024
### GetActorEventsRaw and SubscribeActorEventsRaw
[FIP-0049](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0049.md) introduced _Actor Events_ that can be emitted by user programmed actors. [FIP-0083](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md) introduces new events emitted by the builtin Verified Registry, Miner and Market Actors. These new events for builtin actors are being activated with network version 22 to coincide with _Direct Data Onboarding_ as defined in [FIP-0076](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md) which introduces additional flexibility for data onboarding. Sector, Deal and DataCap lifecycles can be tracked with these events, providing visibility and options for programmatic responses to changes in state.
Actor events are available on message receipts, but can now be retrieved from a node using the new `GetActorEventsRaw` and `SubscribeActorEventsRaw` methods. These methods allow for querying and subscribing to actor events, respectively. They depend on the Lotus node both collecting events (with `Fevm.Events.RealTimeFilterAPI` and `Fevm.Events.HistoricFilterAPI`) and being enabled with the new configuration option `Events.EnableActorEventsAPI`. Note that a Lotus node can only respond to requests for historic events that it retains in its event store.
Both `GetActorEventsRaw` and `SubscribeActorEventsRaw` take a filter parameter which can optionally filter events on:
* `Addresses` of the actor(s) emitting the event
* Specific `Fields` within the event
* `FromHeight` and `ToHeight` to filter events by block height
* `TipSetKey` to restrict events contained within a specific tipset
`GetActorEventsRaw` provides a one-time query for actor events, while `SubscribeActorEventsRaw` provides a long-lived connection (via websockets) to the Lotus node, allowing for real-time updates on actor events. The subscription can be cancelled by the client at any time.
A future Lotus release may include `GetActorEvents` and `SubscribeActorEvents` methods which will provide a more user-friendly interface to actor events, including deserialization of event data.
### Events Configuration Changes
All configuration options previously under `Fevm.Events` are now in the top-level `Events` section along with the new `Events.EnableActorEventsAPI` option mentioned above. If you have non-default options in `[Events]` under `[Fevm]` in your configuration file, please move them to the top-level `[Events]`.
While `Fevm.Events.*` options are deprecated and replaced by `Events.*`, any existing custom values will be respected if their new form isn't set, but a warning will be printed to standard error upon startup. Support for these deprecated options will be removed in a future Lotus release, so please migrate your configuration promptly.
### GetAllClaims and GetAllAlocations
Additionally the methods `GetAllAllocations` and `GetAllClaims` has been added to the Lotus API. These methods lists all the available allocations and claims available in the actor state.
### Lotus CLI
The `filplus` commands used for listing allocations and claims have been updated. If no argument is provided to the either command, they will list out all the allocations and claims in the verified registry actor.
The output list columns have been modified to `AllocationID` and `ClaimID` instead of ID.
```shell
lotus filplus list-allocations --help
NAME:
lotus filplus list-allocations - List allocations available in verified registry actor or made by a client if specified
USAGE:
lotus filplus list-allocations [command options] clientAddress
OPTIONS:
--expired list only expired allocations (default: false)
--json output results in json format (default: false)
--help, -h show help
lotus filplus list-claims --help
NAME:
lotus filplus list-claims - List claims available in verified registry actor or made by provider if specified
USAGE:
lotus filplus list-claims [command options] providerAddress
OPTIONS:
--expired list only expired claims (default: false)
--help, -h show help
```
## Dependencies
- github.com/filecoin-project/go-state-types (v0.12.8 -> v0.13.1)
- chore: deps: update to go-state-types v13.0.0-rc.1 ([filecoin-project/lotus#11662](https://github.com/filecoin-project/lotus/pull/11662))
- chore: deps: update to go-state-types v13.0.0-rc.2 ([filecoin-project/lotus#11675](https://github.com/filecoin-project/lotus/pull/11675))
- chore: deps: update to go-multiaddr v0.12.2 (#11602) ([filecoin-project/lotus#11602](https://github.com/filecoin-project/lotus/pull/11602))
- feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) ([filecoin-project/lotus#11612](https://github.com/filecoin-project/lotus/pull/11612))
- chore: deps: update builtin-actors, GST, verified claims tests ([filecoin-project/lotus#11768](https://github.com/filecoin-project/lotus/pull/11768))
## Others
- Remove PL operated bootstrap nodes from mainnet.pi ([filecoin-project/lotus#11491](https://github.com/filecoin-project/lotus/pull/11491))
- Update epoch heights (#11637) ([filecoin-project/lotus#11637](https://github.com/filecoin-project/lotus/pull/11637))
- chore: Set upgrade heights and change codename ([filecoin-project/lotus#11599](https://github.com/filecoin-project/lotus/pull/11599))
- chore:: backport #11609 to the feat/nv22 branch (#11644) ([filecoin-project/lotus#11644](https://github.com/filecoin-project/lotus/pull/11644))
- fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) ([filecoin-project/lotus#11648](https://github.com/filecoin-project/lotus/pull/11648))
- feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet ([filecoin-project/lotus#11667]https://github.com/filecoin-project/lotus/pull/11667)
- chore: backport #11632 to release/v1.26.0 ([filecoin-project/lotus#11667](https://github.com/filecoin-project/lotus/pull/11667))
- release: bump to v1.26.0-rc2 ([filecoin-project/lotus#11691](https://github.com/filecoin-project/lotus/pull/11691))
- Docs: Drand: document the meaning of "IsChained ([filecoin-project/lotus#11692](https://github.com/filecoin-project/lotus/pull/11692))
- chore: remove old calibnet bootstrappers ([filecoin-project/lotus#11702](https://github.com/filecoin-project/lotus/pull/11702))
- chore: Add lotus-provider to build to match install ([filecoin-project/lotus#11616](https://github.com/filecoin-project/lotus/pull/11616))
- new: add forest bootstrap nodes (#11636) ([filecoin-project/lotus#11636](https://github.com/filecoin-project/lotus/pull/11636))
# v1.25.2 / 2024-01-11
This is an optional but **highly recommended feature release** of Lotus, as it includes fixes for synchronizations issues that users have experienced. The feature release also introduces `Lotus-Provider` in its alpha testing phase, as well as the ability to call external PC2-binaries during the sealing process.
## ☢️ Upgrade Warnings ☢️
There are no upgrade warnings for this feature release.
## ⭐️ Highlights ⭐️
### Lotus-Provider
The feature release ships the alpha release of the new Lotus-Provider binary, together with its initial features - High Availability of WindowPoSt and WinningPoSt.
So what is so exciting about Lotus-Provider:
**High Availability**
- You can run as many `Lotus-Provider` instances as you want for both WindowPoSt and WinningPOSt.
- You can connect them to as many clustered Yugabyte instances as you want to. This allows for an NxN configuration where all instances can communicate with all others.
- You have the option to connect different instances to different chain daemons.
**Simplicity**
- Once the configuration is in the database, setting up a new machine with Lotus-Provider is straightforward. Simply start the binary with the correct flags to find YugabyteDB and specify which configuration layers it should use.
**Durability**
- `Lotus-Provider` is designed with robustness in mind. Updates to the system are handled seamlessly, ensuring that performance and stability are maintained when taking down machines in your cluster for updates.
Read more about [`Lotus-Provider` in the documentation here](https://lotus.filecoin.io/storage-providers/lotus-provider/overview/). And check out the how you can migrate from [Lotus-Miner to Lotus-Provider here](https://lotus.filecoin.io/storage-providers/lotus-provider/setup/). **(Only recommended in testnets while its in Alpha)**
### External PC2-binaries
In this feature release, storage providers can call external PC2-binaries during the sealing process. This allows storage providers to leverage the SupraSeal PC2 binary, which has been shown to improve sealing speed in the PC2-phase. For instance, our current benchmarks show that an NVIDIA RTX A5000 card was able to complete PC2 in approximately 2.5 minutes.
We have verified that SupraSeal PC2 functions properly with Committed Capacity (CC) sectors, both SyntheticPoReps and non-Synthetic PoReps. However calling SupraSeal PC2 with deal sectors is not supported in this feature release.
For more information on how to use SupraSeal PC2 with your `lotus-worker`, as well as how to use feature, please [refer to the documentation](https://lotus.filecoin.io/tutorials/lotus-miner/supra-seal-pc2/).
## New features
- feat: sturdypost work branch ([filecoin-project/lotus#11405](https://github.com/filecoin-project/lotus/pull/11405))
- Adds the `Lotus-Provider` binary, and the HarmonyDB framework.
- feat: worker: Support delegating precommit2 to external binary ([filecoin-project/lotus#11185](https://github.com/filecoin-project/lotus/pull/11185))
- Allows for delegating PreCommit2 to an exteranl binary.
- feat: build: Add SupraSeal-PC2 binary script ([filecoin-project/lotus#11430](https://github.com/filecoin-project/lotus/pull/11430))
- Adds a script for building the SupraSeal-PC2 binary easily.
- Feat: daemon: Auto remove existing chain if importing chain file or snapshot ([filecoin-project/lotus#11277](https://github.com/filecoin-project/lotus/pull/11277))
- Auto removes the existing chain when importing a snapshot.
- feat: Add ETA to lotus sync wait (#11211) ([filecoin-project/lotus#11211](https://github.com/filecoin-project/lotus/pull/11211))
- Adds a ETA indicator to `lotus sync wait`, so you can get an estimate for how long until sync is completed.
- feat: mpool/wdpost: Maximize feecap config ([filecoin-project/lotus#9746](https://github.com/filecoin-project/lotus/pull/9746))
- Adds a Maximixe FeeCap Config
- feat: Add lotus-bench cli option to stress test any binary ([filecoin-project/lotus#11270](https://github.com/filecoin-project/lotus/pull/11270))
- Enables the `Lotus-Bench` to run any binary and analyze their latency and histogram distribution, track most common errors, perform stress testing under different concurrency levels and see how it works under different QPS.
- feat: chain import: don't walk to genesis - 2-3x faster snapshot import (#11446) ([filecoin-project/lotus#11446](https://github.com/filecoin-project/lotus/pull/11446))
- Improves Snapshot import speed, by not walking back to genesis on import.
- feat: metric: export Mpool message count ([filecoin-project/lotus#11361](https://github.com/filecoin-project/lotus/pull/11361))
- Adds the mpool count as a prometheus metric.
- feat: bench: flag to output GenerateWinningPoStWithVanilla params ([filecoin-project/lotus#11460](https://github.com/filecoin-project/lotus/pull/11460))
## Improvements
- feat: bootstrap: add glif bootstrap node on calibration ([filecoin-project/lotus#11175](https://github.com/filecoin-project/lotus/pull/11175))
- fix: bench: Set ticket and seed to a non-all zero value ([filecoin-project/lotus#11429](https://github.com/filecoin-project/lotus/pull/11429))
- fix: alert: Check UDPbuffer-size ([filecoin-project/lotus#11360](https://github.com/filecoin-project/lotus/pull/11360))
- feat: cli: sort actor CIDs alphabetically before printing (#11345) ([filecoin-project/lotus#11345](https://github.com/filecoin-project/lotus/pull/11345))
- fix: worker: Connect when --listen is not set ([filecoin-project/lotus#11294](https://github.com/filecoin-project/lotus/pull/11294))
- fix: miner info: Show correct sector state counts ([filecoin-project/lotus#11456](https://github.com/filecoin-project/lotus/pull/11456))
- feat: miner: defensive check for equivocation ([filecoin-project/lotus#11321](https://github.com/filecoin-project/lotus/pull/11321))
- feat: Instructions for setting up Grafana/Prometheus for monitoring local lotus node ([filecoin-project/lotus#11276](https://github.com/filecoin-project/lotus/pull/11276))
- fix: cli: Wrap error in wallet sign ([filecoin-project/lotus#11273](https://github.com/filecoin-project/lotus/pull/11273))
- fix: Add time slicing to splitstore purging to reduce lock congestion ([filecoin-project/lotus#11269](https://github.com/filecoin-project/lotus/pull/11269))
- feat: sealing: load SectorsSummary from sealing SectorStats instead of calling API each time ([filecoin-project/lotus#11353](https://github.com/filecoin-project/lotus/pull/11353))
- fix: shed: additional metrics in `mpool miner-select-messages` ([filecoin-project/lotus#11253](https://github.com/filecoin-project/lotus/pull/11253))
- storage: Return soft err when sector alloc fails in acquire ([filecoin-project/lotus#11338](https://github.com/filecoin-project/lotus/pull/11338))
- feat: miner: log detailed timing breakdown when mining takes longer than the block's timestamp ([filecoin-project/lotus#11228](https://github.com/filecoin-project/lotus/pull/11228))
- fix: shed: make invariants checker work with splitstore ([filecoin-project/lotus#11391](https://github.com/filecoin-project/lotus/pull/11391))
- feat: eth: encode eth tx input as solidity ABI (#11402) ([filecoin-project/lotus#11402](https://github.com/filecoin-project/lotus/pull/11402))
- fix: eth: use the correct state-tree when resolving addresses (#11387) ([filecoin-project/lotus#11387](https://github.com/filecoin-project/lotus/pull/11387))
- fix: eth: remove trace sanity check (#11385) ([filecoin-project/lotus#11385](https://github.com/filecoin-project/lotus/pull/11385))
- fix: chain: make failure to load the chain state fatal (#11426) ([filecoin-project/lotus#11426](https://github.com/filecoin-project/lotus/pull/11426))
- fix: build: an epoch is near an upgrade iff the upgrade is enabled (#11401) ([filecoin-project/lotus#11401](https://github.com/filecoin-project/lotus/pull/11401))
- fix: eth: handle unresolvable addresses (#11433) ([filecoin-project/lotus#11433](https://github.com/filecoin-project/lotus/pull/11433))
- fix: eth: correctly encode and simplify native input/output encoding (#11382) ([filecoin-project/lotus#11382](https://github.com/filecoin-project/lotus/pull/11382))
- fix: worker: listen for interrupt signals in GetStorageMinerAPI loop (#11309) ([filecoin-project/lotus#11309](https://github.com/filecoin-project/lotus/pull/11309))
- fix: sync: iterate over returned messages directly (#11373) ([filecoin-project/lotus#11373](https://github.com/filecoin-project/lotus/pull/11373))
- fix: miner: correct duration logs in mineOne ([filecoin-project/lotus#11241](https://github.com/filecoin-project/lotus/pull/11241))
- fix: cli: Add print to unseal cmd ([filecoin-project/lotus#11271](https://github.com/filecoin-project/lotus/pull/11271))
- fix: networking: avoid dialing when trying to handshake peers ([filecoin-project/lotus#11262](https://github.com/filecoin-project/lotus/pull/11262))
- metric milliseconds computation with golang original method (#11403) ([filecoin-project/lotus#11403](https://github.com/filecoin-project/lotus/pull/11403))
- feat: shed: fix blockstore prune (#11197) ([filecoin-project/lotus#11197](https://github.com/filecoin-project/lotus/pull/11197))
- refactor:ffi: replace ClearLayerData with ClearCache (#11352) ([filecoin-project/lotus#11352](https://github.com/filecoin-project/lotus/pull/11352))
- fix: api: compute gasUsedRatio based on max gas in the tipset (#11354) ([filecoin-project/lotus#11354](https://github.com/filecoin-project/lotus/pull/11354))
- fix: api: compute the effective gas cost with the correct base-fee (#11357) ([filecoin-project/lotus#11357](https://github.com/filecoin-project/lotus/pull/11357))
- fix: api: return errors on failure to lookup an eth txn receipt (#11329) ([filecoin-project/lotus#11329](https://github.com/filecoin-project/lotus/pull/11329))
- fix: api: exclude reverted events in `eth_getLogs` results (#11318) ([filecoin-project/lotus#11318](https://github.com/filecoin-project/lotus/pull/11318))
- api: Add block param to eth_estimateGas ([filecoin-project/lotus#11462](https://github.com/filecoin-project/lotus/pull/11462))
- opt: fix duplicate check exitcode ([filecoin-project/lotus#11171](https://github.com/filecoin-project/lotus/pull/11171))
- fix: lotus-provider: show addresses in log ([filecoin-project/lotus#11490](https://github.com/filecoin-project/lotus/pull/11490))
- fix: lotus-provider: Wait for the correct taskID ([filecoin-project/lotus#11493](https://github.com/filecoin-project/lotus/pull/11493))
- harmony: Fix task reclaim on restart ([filecoin-project/lotus#11498](https://github.com/filecoin-project/lotus/pull/11498))
- fix: lotus-provider: Fix log output format in wdPostTaskCmd ([filecoin-project/lotus#11504](https://github.com/filecoin-project/lotus/pull/11504))
- fix: lp docsgen ([filecoin-project/lotus#11488](https://github.com/filecoin-project/lotus/pull/11488))
- fix: lotus-provider do not suggest default layer ([filecoin-project/lotus#11486](https://github.com/filecoin-project/lotus/pull/11486))
- feat: syncer: optimize syncFork for one-epoch forks ([filecoin-project/lotus#11533](https://github.com/filecoin-project/lotus/pull/11533))
- fix: sync: do not include incoming in return of syncFork ([filecoin-project/lotus#11541](https://github.com/filecoin-project/lotus/pull/11541))
- fix: wdpost: fix vanilla proof indexes ([filecoin-project/lotus#11550](https://github.com/filecoin-project/lotus/pull/11550))
- feat: exchange: change GetBlocks to always fetch the requested number of tipsets ([filecoin-project/lotus#11565](https://github.com/filecoin-project/lotus/pull/11565))
## Dependencies
- update go-libp2p to v0.31.0 ([filecoin-project/lotus#11225](https://github.com/filecoin-project/lotus/pull/11225))
- deps: gostatetype (#11437) ([filecoin-project/lotus#11437](https://github.com/filecoin-project/lotus/pull/11437))
- fix: deps: stop using go-libp2p deprecated peer.ID.Pretty ([filecoin-project/lotus#11263](https://github.com/filecoin-project/lotus/pull/11263))
- chore:libp2p:update libp2p deps in release-v1.25.2 to v0.31.1 ([filecoin-project/lotus#11524](https://github.com/filecoin-project/lotus/pull/11524))
- deps: update go-multiaddr to v0.12.0 ([filecoin-project/lotus#11524](https://github.com/filecoin-project/lotus/pull/11558))
- dep: go-multi-address to v0.12.1 ([filecoin-project/lotus#11564](https://github.com/filecoin-project/lotus/pull/11564))
## Others
- chore: update FFI (#11431) ([filecoin-project/lotus#11431](https://github.com/filecoin-project/lotus/pull/11431))
- chore: build: bump master to v1.25.1-dev ([filecoin-project/lotus#11450](https://github.com/filecoin-project/lotus/pull/11450))
- chore: releases :merge releases into master ([filecoin-project/lotus#11448](https://github.com/filecoin-project/lotus/pull/11448))
- chore: actors: update v12 to the final release ([filecoin-project/lotus#11440](https://github.com/filecoin-project/lotus/pull/11440))
- chore: Remove ipfs main bootstrap nodes (#11200) ([filecoin-project/lotus#11200](https://github.com/filecoin-project/lotus/pull/11200))
- Remove PL's european bootstrap nodes from mainnet.pi ([filecoin-project/lotus#11315](https://github.com/filecoin-project/lotus/pull/11315))
- chore: deps: update to go-state-types v0.12.7 ([filecoin-project/lotus#11428](https://github.com/filecoin-project/lotus/pull/11428))
- fix: Add .vscode to gitignore ([filecoin-project/lotus#11275](https://github.com/filecoin-project/lotus/pull/11275))
- fix: test: temporarily exempt SynthPorep constants from test ([filecoin-project/lotus#11259](https://github.com/filecoin-project/lotus/pull/11259))
- feat: skip TestSealAndVerify3 until it's fixed ([filecoin-project/lotus#11230](https://github.com/filecoin-project/lotus/pull/11230))
- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#11250](https://github.com/filecoin-project/lotus/pull/11250))
- fix: config: Update ColdStoreType comments ([filecoin-project/lotus#11274](https://github.com/filecoin-project/lotus/pull/11274))
- readme: bump up golang version (#11347) ([filecoin-project/lotus#11347](https://github.com/filecoin-project/lotus/pull/11347))
- chore: watermelon: upgrade epoch ([filecoin-project/lotus#11374](https://github.com/filecoin-project/lotus/pull/11374))
- add support for v12 check invariants and also a default case to reduce future confusion (#11371) ([filecoin-project/lotus#11371](https://github.com/filecoin-project/lotus/pull/11371))
- test: drand: switch tests to drand testnet (from devnet) (#11359) ([filecoin-project/lotus#11359](https://github.com/filecoin-project/lotus/pull/11359))
- feat: chain: light-weight patch to fix calibrationnet again by removing move_partitions from built-in actors (#11409) ([filecoin-project/lotus#11409](https://github.com/filecoin-project/lotus/pull/11409))
- chore: cli: Revert move-partitions cmd ([filecoin-project/lotus#11408](https://github.com/filecoin-project/lotus/pull/11408))
- chore: forward-port calibnet hotfix to master ([filecoin-project/lotus#11407](https://github.com/filecoin-project/lotus/pull/11407))
- fix: migration: set premigration to 90 minutes ([filecoin-project/lotus#11395](https://github.com/filecoin-project/lotus/pull/11395))
- feat: chain: light-weight patch to fix calibrationnet (#11363) ([filecoin-project/lotus#11363](https://github.com/filecoin-project/lotus/pull/11363))
- chore: merge feat/nv21 into master ([filecoin-project/lotus#11336](https://github.com/filecoin-project/lotus/pull/11336))
- docs: Link the release section in the release flow doc ([filecoin-project/lotus#11299](https://github.com/filecoin-project/lotus/pull/11299))
- fix: ci: fetch params for the storage unit tests ([filecoin-project/lotus#11441](https://github.com/filecoin-project/lotus/pull/11441))
- Update mainnet.pi ([filecoin-project/lotus#11288](https://github.com/filecoin-project/lotus/pull/11288))
- add go linter - "unused" (#11235) ([filecoin-project/lotus#11235](https://github.com/filecoin-project/lotus/pull/11235))
- Fix/texts (#11298) ([filecoin-project/lotus#11298](https://github.com/filecoin-project/lotus/pull/11298))
- fix typo in rate-limit flag description (#11316) ([filecoin-project/lotus#11316](https://github.com/filecoin-project/lotus/pull/11316))
- eth_filter flake debug ([filecoin-project/lotus#11261](https://github.com/filecoin-project/lotus/pull/11261))
- fix: sealing: typo in FinalizeReplicaUpdate ([filecoin-project/lotus#11255](https://github.com/filecoin-project/lotus/pull/11255))
- chore: slice loop replace (#11349) ([filecoin-project/lotus#11349](https://github.com/filecoin-project/lotus/pull/11349))
- backport: docker build fix for v1.25.2 ([filecoin-project/lotus#11560](https://github.com/filecoin-project/lotus/pull/11560))
## Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| Andrew Jackson (Ajax) | 161 | +24328/-12464 | 4148 |
| Łukasz Magiera | 99 | +5238/-2690 | 260 |
| Shrenuj Bansal | 27 | +3402/-1265 | 76 |
| Fridrik Asmundsson | 15 | +1148/-307 | 58 |
| Steven Allen | 15 | +674/-337 | 35 |
| Ian Norden | 1 | +625/-3 | 4 |
| Aarsh Shah | 4 | +227/-167 | 14 |
| Phi | 19 | +190/-183 | 32 |
| Aayush Rajasekaran | 3 | +291/-56 | 16 |
| Mikers | 2 | +76/-262 | 19 |
| Aayush | 14 | +111/-59 | 21 |
| Friðrik Ásmundsson | 1 | +101/-1 | 2 |
| Alejandro Criado-Pérez | 1 | +36/-36 | 27 |
| Jie Hou | 5 | +36/-10 | 5 |
| Florian RUEN | 2 | +24/-19 | 5 |
| Phi-rjan | 3 | +20/-8 | 3 |
| Icarus9913 | 1 | +11/-11 | 6 |
| Jiaying Wang | 3 | +8/-7 | 5 |
| guangwu | 1 | +3/-10 | 2 |
| Marten Seemann | 1 | +6/-6 | 2 |
| simlecode | 1 | +0/-6 | 2 |
| GlacierWalrus | 2 | +0/-5 | 2 |
| Anton Evangelatov | 1 | +2/-2 | 1 |
| Ales Dumikau | 3 | +2/-2 | 3 |
| renran | 1 | +2/-1 | 1 |
| Volker Mische | 1 | +1/-1 | 1 |
| Icarus Wu | 1 | +1/-1 | 1 |
| Hubert | 1 | +1/-1 | 1 |
| Aloxaf | 1 | +1/-1 | 1 |
| Alejandro | 1 | +1/-1 | 1 |
| lazavikmaria | 1 | +1/-0 | 1 |
# v1.25.1 / 2023-12-09
This is a **highly recommended PATCH RELEASE.** The patch release fixes the issue were node operators trying to catch up sync were unable to sync large message blocks/epochs due to an increased number of messages on the network.
This patch release allows for up to 10k messages per block. Additionally, it introduces a limit on the amount of data that can be read at once, ensuring the system can handle worst-case scenarios.
## Improvements
- fix: exchange: allow up to 10k messages per block ([filecoin-project/lotus#11506](https://github.com/filecoin-project/lotus/pull/11506))
# v 1.25.0 / 2023-11-22
This is a highly recommended feature release of Lotus. This optional release supports the Filecoin network version 21 upgrade, codenamed Watermelon 🍉, in addition to the numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers.
**The Filecoin network upgrade v21, codenamed Watermelon 🍉, is at epoch 3469380 - 2023-12-12T13:30:00Z**
The full list of [protocol improvements delivered in the network upgrade can be found here](https://github.com/filecoin-project/core-devs/blob/master/Network%20Upgrades/v21.md).
## ☢️ Upgrade Warnings ☢️
- Read through the [changelog of the mandatory v1.24.0 release](https://github.com/filecoin-project/lotus/releases/tag/v1.24.0). Especially the `Migration` and `v12 Builtin Actor Bundle` sections.
- Please remove and clone a new Lotus repo (`git clone https://github.com/filecoin-project/lotus.git`) when upgrading to this release.
- This feature release requires a minimum Go version of v1.20.7 or higher to successfully build Lotus. Go version 1.21.x is not supported yet. - This feature release requires a minimum Go version of v1.20.7 or higher to successfully build Lotus. Go version 1.21.x is not supported yet.
- EthRPC providers, please check out the [new tracing API to Lotus RPC](https://github.com/filecoin-project/lotus/pull/11100) - EthRPC providers, please check out the [new tracing API to Lotus RPC](https://github.com/filecoin-project/lotus/pull/11100)
@ -532,9 +101,6 @@ Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimization
- fix(client): single-root error message ([filecoin-project/lotus#11214](https://github.com/filecoin-project/lotus/pull/11214)) - fix(client): single-root error message ([filecoin-project/lotus#11214](https://github.com/filecoin-project/lotus/pull/11214))
- fix: worker: Convert `DC_[SectorSize]_[ResourceRestriction]` if set ([filecoin-project/lotus#11224](https://github.com/filecoin-project/lotus/pull/11224)) - fix: worker: Convert `DC_[SectorSize]_[ResourceRestriction]` if set ([filecoin-project/lotus#11224](https://github.com/filecoin-project/lotus/pull/11224))
- chore: backport #11338 onto release/v1.25.0 ([filecoin-project/lotus#11350](https://github.com/filecoin-project/lotus/pull/11350)) - chore: backport #11338 onto release/v1.25.0 ([filecoin-project/lotus#11350](https://github.com/filecoin-project/lotus/pull/11350))
- fix: lotus-provider: lotus-provider msg sending ([filecoin-project/lotus#11480](https://github.com/filecoin-project/lotus/pull/11480))
- fix: lotus-provider: Fix winning PoSt ([filecoin-project/lotus#11483](https://github.com/filecoin-project/lotus/pull/11483))
- chore: fix: sql Scan cannot write to an object ([filecoin-project/lotus#11487](https://github.com/filecoin-project/lotus/pull/11487))
- fix: Exclude reverted events in `eth_getLogs` results [filecoin-project/lotus#11318](https://github.com/filecoin-project/lotus/pull/11318) - fix: Exclude reverted events in `eth_getLogs` results [filecoin-project/lotus#11318](https://github.com/filecoin-project/lotus/pull/11318)
## Dependencies ## Dependencies
@ -544,8 +110,6 @@ Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimization
- fix: build: use tagged releases ([filecoin-project/lotus#11194](https://github.com/filecoin-project/lotus/pull/11194)) - fix: build: use tagged releases ([filecoin-project/lotus#11194](https://github.com/filecoin-project/lotus/pull/11194))
- chore: test-vectors: update ([filecoin-project/lotus#11196](https://github.com/filecoin-project/lotus/pull/11196)) - chore: test-vectors: update ([filecoin-project/lotus#11196](https://github.com/filecoin-project/lotus/pull/11196))
- chore: backport #11365 to release/v1.25.0 ([filecoin-project/lotus#11369](https://github.com/filecoin-project/lotus/pull/11369)) - chore: backport #11365 to release/v1.25.0 ([filecoin-project/lotus#11369](https://github.com/filecoin-project/lotus/pull/11369))
- chore: deps: update to go-state-types v0.12.8 ([filecoin-project/lotus#11339](https://github.com/filecoin-project/lotus/pull/11437))
- chore: deps: update to final actors ([filecoin-project/lotus#11330](https://github.com/filecoin-project/lotus/pull/11440))
- github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0) - github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0)
- github.com/filecoin-project/test-vectors/schema (v0.0.5 -> v0.0.7) - github.com/filecoin-project/test-vectors/schema (v0.0.5 -> v0.0.7)
@ -572,104 +136,79 @@ Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimization
- backport: tests: add SynthPorep layers to cachefiles ([filecoin-project/lotus#11344](https://github.com/filecoin-project/lotus/pull/11344)) - backport: tests: add SynthPorep layers to cachefiles ([filecoin-project/lotus#11344](https://github.com/filecoin-project/lotus/pull/11344))
- chore: backport #11408 to release/v1.25.0 ([filecoin-project/lotus#11414](https://github.com/filecoin-project/lotus/pull/11414)) - chore: backport #11408 to release/v1.25.0 ([filecoin-project/lotus#11414](https://github.com/filecoin-project/lotus/pull/11414))
- chore: backport calibnet lightweight patch ([filecoin-project/lotus#11422](https://github.com/filecoin-project/lotus/pull/11422)) - chore: backport calibnet lightweight patch ([filecoin-project/lotus#11422](https://github.com/filecoin-project/lotus/pull/11422))
- chore: update bootstrap nodes ([filecoin-project/lotus#11288](https://github.com/filecoin-project/lotus/pull/11288))
- chore: add bootstrap node on calibration ([filecoin-project/lotus#11175](https://github.com/filecoin-project/lotus/pull/11175))
# 1.24.0 / 2023-11-22 # v1.24.0-rc2 / 2023-10-17
This is the stable release for the upcoming **MANDATORY** Filecoin network upgrade v21, codenamed Watermelon 🍉, at **epoch 3469380 - 2023-12-12T13:30:00Z**. This is the second release candidate of the upcoming **MANDATORY Lotus v1.24.0** release, which will deliver the Filecoin network version 21, codenamed Watermelon 🍉.
**This release candidate does NOT set an upgrade epoch for mainnet, but sets the calibration network to upgrade at epoch 1013134, which is 2023-10-19T13:00:00Z.** This second release candidate updates go-state-types to v0.12.5, adding a proofs mapping that was missing v0.12.5. Lotus v1.24.0-RC1 remains valid for the network upgrade in the Calibration network. However, storage providers intending to conduct Synthetic PoRep testing are advised to use RC2.
The Filecoin network version 21 delivers the following FIPs: The Filecoin network version 21 delivers the following FIPs:
- [FIP0052: Increase Max Sector Commitment to 3.5 years](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0052.md) - [FIP0052: Increase Max Sector Commitment to 3.5 years](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0052.md)
- [FIP0059: Synthetic PoRep](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0059.md) - [FIP0059: Synthetic PoRep](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0059.md)
- [FIP0070: Allow SPs to move partitions between deadlines](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0070.md)
- [FIP0071: Deterministic State Access (IPLD Reachability)](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0071.md) - [FIP0071: Deterministic State Access (IPLD Reachability)](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0071.md)
- [FIP0072: Improved event syscall API](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0072.md) - [FIP0072: Improved event syscall API](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0072.md)
- [FIP0073: Remove beneficiary from the self_destruct syscall](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0073.md) - [FIP0073: Remove beneficiary from the self_destruct syscall](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0073.md)
- [FIP0075: Improvements to FVM randomness syscalls](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0075.md) - [FIP0075: Improvements to FVM randomness syscalls](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0075.md)
Full list of the other protocol improvements we are delivering can be found [here](https://github.com/filecoin-project/core-devs/blob/master/Network%20Upgrades/v21.md).
## ☢️ Upgrade Warnings ☢️
This feature release requires a minimum Go version of v1.20.7 or higher to successfully build Lotus. Go version 1.21.x is not supported yet.
## v12 Builtin Actor Bundles ## v12 Builtin Actor Bundles
[Builtin actor v12.0.0](https://github.com/filecoin-project/builtin-actors/releases/tag/v12.0.0) is used for supporting this upgrade. The actor bundles for the calibration network can be checked as follows:
Make sure that your lotus actor bundle matches the v12 actors manifest by running the following cli after upgrading:
``` ```
./lotus state actor-cids --network-version 21 lotus state actor-cids --network-version=21
Network Version: 21 Network Version: 21
Actor Version: 12 Actor Version: 12
Manifest CID: bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio Manifest CID: bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs
Actor CID Actor CID
datacap bafk2bzacebpiwb2ml4qbnnaayxumtk43ryhc63exdgnhivy3hwgmzemawsmpq paymentchannel bafk2bzacebaxhk4itfiuvbftg7kz5zxugqnvdgerobitjq4vl6q4orcwk6wqg
paymentchannel bafk2bzacectv4cm47bnhga5febf3lo3fq47g72kmmp2xd5s6tcxz7hiqdywa4
storagemarket bafk2bzacedylkg5am446lcuih4voyzdn4yjeqfsxfzh5b6mcuhx4mok5ph5c4
storagepower bafk2bzacecsij5tpfzjpfuckxvccv2p3bdqjklkrfyyoei6lx5dyj5j4fvjm6
cron bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls
eam bafk2bzaceb3elj4hfbbjp7g5bptc7su7mptszl4nlqfedilxvstjo5ungm6oe
ethaccount bafk2bzaceb4gkau2vgsijcxpfuq33bd7w3efr2rrhxrwiacjmns2ntdiamswq
reward bafk2bzacealqnxn5lwzwexd6reav4dppypquklx2ujlnvaxiqk2tzstyvkp5u
verifiedregistry bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q
evm bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi
placeholder bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro placeholder bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro
storageminer bafk2bzacedo75pabe4i2l3hvhtsjmijrcytd2y76xwe573uku25fi7sugqld6 ethaccount bafk2bzaceajmc3y3sedsqymfla3dzzqzmbu5kmr2iskm26ga2u34ll5fpztfw
system bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu evm bafk2bzaced4sozr7m6rzcgpobzeiupghthfw6afumysu3oz6bxxirv74uo3vw
account bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa system bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376
init bafk2bzacebllyegx5r6lggf6ymyetbp7amacwpuxakhtjvjtvoy2bfkzk3vms init bafk2bzaceaewh7b6zl2egclm7fqzx2lsqr57i75lb6cj43ndoa4mal3k5ld3m
multisig bafk2bzacednkwcpw5yzxjceoaliajgupzj6iqxe7ks2ll3unspbprbo5f2now
eam bafk2bzacecb6cnwftvavpph4p34zs4psuy5xvbrhf7vszkva4npw6mw3c42xe
reward bafk2bzacedra77pcglf7vdca2itcaa4vd6xrxynxmgfgdjdxqxfwqyhtoxehy
storagemarket bafk2bzacea7g46y7xxu2zjq2h75x6mmx3utz2uxnlvnwi6tzpsvulna3bmiva
storageminer bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq
storagepower bafk2bzacedd3ka44k7d46ckbinjhv3diyuu2epgbyvhqqyjkc64qlrg3wlgzi
verifiedregistry bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo
account bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6
cron bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6
datacap bafk2bzacecq5ppfskxgv3iea3jarsix6jdduuhwsn4fbvngtbmzelzmlygorm
``` ```
## Migration
We are expecting a heavier than normal state migration for this upgrade due to the amount of state changes introduced for miner sector info. (This is a similar migration as the Shark upgrade, however, we have introduced a couple of migration performance optimizations since then for a smoother upgrade experience.)
All node operators, including storage providers, should be aware that ONE pre-migration is being scheduled 180 epochs before the upgrade, around 2023-12-12T12:00:00Z. It will take around 20-30 minutes for the pre-migration and less than 30 seconds for the final migration, depending on the amount of historical state in the node blockstore and the hardware specs the node is running on. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries (during our testing, it topped out about 205 RAM(htop) on a 1TiB box).
We recommend node operators (who haven't enabled splitstore `discard` mode) that do not care about historical chain states, to prune the chain blockstore by syncing from a snapshot 1-2 days before the upgrade.
Note to full archival node operators: you may expect it takes some time for the node to complete the final migration, during this period your node will fall out of sync and your chain service may have some disruption. However, you can expect the node to catch up soon after the migration completes. You can test out the migration by running the following on your node in offline mode:
1. `lotus chain head | head -n1`
2. Stop Lotus daemon
3. `./lotus-shed migrate-state --repo=[path-to-your-lotus-repo] 21 [output-of-step-1]`
You can check out the [tutorial for benchmarking the network migration here.](https://lotus.filecoin.io/kb/test-migration/)
## BREAKING CHANGE
There is a new protocol limit on how many partition could be submited in one PoSt - if you have any customized tooling for batching PoSts, please update accordingly.
- feat: limit PoSted partitions to 3 ([filecoin-project/lotus#11327](https://github.com/filecoin-project/lotus/pull/11327))
## New features ## New features
- Implement and support [FIP0052: Increase Max Sector Commitment to 3.5 years](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0052.md) - Implement and support [FIP0052: Increase Max Sector Commitment to 3.5 years](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0052.md)
- fix: docs: Update SectorLifetime to be in line with FIP-0052 ([filecoin-project/lotus#11314](https://github.com/filecoin-project/lotus/pull/11314)) - fix: docs: Update SectorLifetime to be in line with FIP-0052 ([filecoin-project/lotus#11314](https://github.com/filecoin-project/lotus/pull/11314))
- Implement and support [FIP0059: Synthetic PoRep](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0059.md) - Check out the [Lotus documentation for Synthetic PoRep](https://lotus.filecoin.io/storage-providers/advanced-configurations/sealing/#synthetic-porep). - Implement and support [FIP0059: Synthetic PoRep](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0059.md) - Check out the [Lotus documentation for Synthetic PoRep](https://lotus.filecoin.io/storage-providers/advanced-configurations/sealing/#synthetic-porep).
- feat: implement Synthetic PoRep ([filecoin-project/lotus#11258](https://github.com/filecoin-project/lotus/pull/11258)) - feat: implement Synthetic PoRep ([filecoin-project/lotus#11258](https://github.com/filecoin-project/lotus/pull/11258))
- chore: config: Update todo in UseSyntheticPoRep ([filecoin-project/lotus#11297](https://github.com/filecoin-project/lotus/pull/11297)) - chore: config: Update todo in UseSyntheticPoRep ([filecoin-project/lotus#11297](https://github.com/filecoin-project/lotus/pull/11297))
- Implement and support [FIP0070: Allow SPs to move partitions between deadlines](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0070.md) - Check out the [Lotus documentation for Moving Partitions](https://lotus.filecoin.io/storage-providers/operate/daily-chores/#move-partitions)
- Feat: Lotus cli: actor: Support move partition command to move partitions' deadline ([filecoin-project/lotus#11301](https://github.com/filecoin-project/lotus/pull/11301))
- feat: limit PoSted partitions to 3 ([filecoin-project/lotus#11327](https://github.com/filecoin-project/lotus/pull/11327))
## Improvements ## Improvements
- Backport: feat: sealing: Switch to calling PreCommitSectorBatch2 ([filecoin-project/lotus#11215](https://github.com/filecoin-project/lotus/pull/11215)) - Backport: feat: sealing: Switch to calling PreCommitSectorBatch2 ([filecoin-project/lotus#11215](https://github.com/filecoin-project/lotus/pull/11215))
- updated the boostrap nodes
## Dependencies ## Dependencies
- github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0) - github.com/filecoin-project/go-amt-ipld/v4 (v4.0.0 -> v4.2.0)
- chore: deps: update bulitin-actors, FVM, and go-state-types ([filecoin-project/lotus#11291](https://github.com/filecoin-project/lotus/pull/11291))
- chore: deps: update FFI, FVM, and actors ([filecoin-project/lotus#11310](https://github.com/filecoin-project/lotus/pull/11310)) - chore: deps: update FFI, FVM, and actors ([filecoin-project/lotus#11310](https://github.com/filecoin-project/lotus/pull/11310))
- chore: deps: update to final actors ([filecoin-project/lotus#11330](https://github.com/filecoin-project/lotus/pull/11440)) - chore: deps: update to latest actors and FFI ([filecoin-project/lotus#11330](https://github.com/filecoin-project/lotus/pull/11330))
- chore: deps: update to go-state-types v0.12.8 ([filecoin-project/lotus#11339](https://github.com/filecoin-project/lotus/pull/11437)) - chore: deps: update to go-state-types v0.12.5 ([filecoin-project/lotus#11339](https://github.com/filecoin-project/lotus/pull/11339))
- chore: deps: update libp2p to v0.30.0 #11434
## Snapshots
The [Forest team](https://filecoinproject.slack.com/archives/C029LPZ5N73) at Chainsafe has launched a brand new lightweight snapshot service that is backed up by forest nodes! This is a great alternative service along with the fil-infra one, and it is compatible with lotus! We recommend lotus users to check it out [here](https://docs.filecoin.io/networks/mainnet#resources)!
## Others
- chore: nv-skeleton for feat/nv21-branch ([filecoin-project/lotus#11176](https://github.com/filecoin-project/lotus/pull/11176))
- chore: chain/actors: Use type proxies instead of versioned GST imports ([filecoin-project/lotus#11216](https://github.com/filecoin-project/lotus/pull/11216))
- chore: butterfly: Add preliminary nv21 assets ([filecoin-project/lotus#11293](https://github.com/filecoin-project/lotus/pull/11293))
- chore: butterfly: Update Butterfly Assets ([filecoin-project/lotus#11312](https://github.com/filecoin-project/lotus/pull/11312))
- chore: release: Set calibration upgrade height ([filecoin-project/lotus#11331](https://github.com/filecoin-project/lotus/pull/11331))
- chore: build: bump version to 1.24.0-rc1 ([filecoin-project/lotus#11332](https://github.com/filecoin-project/lotus/pull/11332))
# v1.23.3 / 2023-08-01 # v1.23.3 / 2023-08-01
@ -697,7 +236,7 @@ This feature release requires a **minimum Go version of v1.19.12 or higher to su
- feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797)) - feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797))
- feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929)) - feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929))
## Improvements && Bug Fixes ## Improvements
- chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040)) - chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040))
- feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015)) - feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015))
- sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002)) - sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002))
@ -775,10 +314,6 @@ This feature release requires a **minimum Go version of v1.19.12 or higher to su
- fix: cli: Change arg wording in change-beneficiary cmd ([filecoin-project/lotus#10823](https://github.com/filecoin-project/lotus/pull/10823)) - fix: cli: Change arg wording in change-beneficiary cmd ([filecoin-project/lotus#10823](https://github.com/filecoin-project/lotus/pull/10823))
- refactor: streamline error handling in CheckPendingMessages (#10818) ([filecoin-project/lotus#10818](https://github.com/filecoin-project/lotus/pull/10818)) - refactor: streamline error handling in CheckPendingMessages (#10818) ([filecoin-project/lotus#10818](https://github.com/filecoin-project/lotus/pull/10818))
- feat: Add tmp indices to events table while performing migration to V2 - feat: Add tmp indices to events table while performing migration to V2
- fix: sync: iterate over returned messages directly #11373
- fix: api: compute the effective gas cost with the correct base-fee #11357
- fix: check invariants: v12 check #11371
- fix: api: compute gasUsedRatio based on max gas in the tipset #11354
# v1.23.2 / 2023-06-28 # v1.23.2 / 2023-06-28

View File

@ -1,5 +1,5 @@
##################################### #####################################
FROM golang:1.21.7-bullseye AS lotus-builder FROM golang:1.20.7-bullseye AS lotus-builder
MAINTAINER Lotus Development Team MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
@ -33,19 +33,18 @@ RUN set -eux; \
COPY ./ /opt/filecoin COPY ./ /opt/filecoin
WORKDIR /opt/filecoin WORKDIR /opt/filecoin
RUN scripts/docker-git-state-check.sh
### make configurable filecoin-ffi build
ARG FFI_BUILD_FROM_SOURCE=0 ARG FFI_BUILD_FROM_SOURCE=0
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE} ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
#RUN make clean deps RUN make clean deps
ARG RUSTFLAGS="" ARG RUSTFLAGS=""
ARG GOFLAGS="" ARG GOFLAGS=""
RUN make clean deps && \ RUN make buildall
make lotus lotus-shed lotus-stats && \
install -C ./lotus /usr/local/bin/lotus && \
install -C ./lotus-shed /usr/local/bin/lotus-shed && \
install -C ./lotus-stats /usr/local/bin/lotus-stats
##################################### #####################################
FROM ubuntu:20.04 AS lotus-base FROM ubuntu:20.04 AS lotus-base
@ -70,11 +69,12 @@ RUN useradd -r -u 532 -U fc \
FROM lotus-base AS lotus FROM lotus-base AS lotus
MAINTAINER Lotus Development Team MAINTAINER Lotus Development Team
COPY --from=lotus-builder \ COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
/usr/local/bin/lotus \ COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
/usr/local/bin/lotus-shed \ COPY scripts/docker-lotus-entrypoint.sh /
/usr/local/bin/
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT https://snapshots.mainnet.filops.net/minimal/latest
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT}
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_PATH /var/lib/lotus ENV LOTUS_PATH /var/lib/lotus
ENV DOCKER_LOTUS_IMPORT_WALLET "" ENV DOCKER_LOTUS_IMPORT_WALLET ""
@ -89,42 +89,48 @@ USER fc
EXPOSE 1234 EXPOSE 1234
ENTRYPOINT ["/docker-lotus-entrypoint.sh"]
CMD ["-help"] CMD ["-help"]
##################################### #####################################
FROM lotus-base AS lotus-all-in-one FROM lotus-base AS lotus-all-in-one
# Install netcat for healthcheck
RUN apt-get update && apt-get install -y netcat && apt-get install -y iproute2
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT=https://forest-archive.chainsafe.dev/latest/mainnet/ ENV WALLET_PATH /var/lib/lotus-wallet
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT=${DOCKER_LOTUS_IMPORT_SNAPSHOT}
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-seed /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
COPY scripts/docker-lotus-entrypoint.sh /docker-lotus-entrypoint.sh COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
RUN chmod +x /docker-lotus-entrypoint.sh
RUN mkdir /var/tmp/filecoin-proof-parameters RUN mkdir /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/tmp/filecoin-proof-parameters RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/tmp/filecoin-proof-parameters VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus VOLUME /var/lib/lotus
#VOLUME /var/lib/lotus-miner VOLUME /var/lib/lotus-miner
#VOLUME /var/lib/lotus-worker VOLUME /var/lib/lotus-worker
#VOLUME /var/lib/lotus-wallet VOLUME /var/lib/lotus-wallet
EXPOSE 1234 EXPOSE 1234
EXPOSE 1235 EXPOSE 2345
EXPOSE 3456
EXPOSE 1777

View File

@ -1 +1 @@
1.21.7 1.20.7

View File

@ -73,7 +73,7 @@ All releases under an odd minor version number indicate **feature releases**. Th
Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number. Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number.
We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the [Release Cycle section](#release-cycle). We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the Release Cycle section (TODO: Link).
### Examples Scenarios ### Examples Scenarios

View File

@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
deps: $(BUILD_DEPS) deps: $(BUILD_DEPS)
.PHONY: deps .PHONY: deps
build-devnets: build lotus-seed lotus-shed lotus-provider build-devnets: build lotus-seed lotus-shed
.PHONY: build-devnets .PHONY: build-devnets
debug: GOFLAGS+=-tags=debug debug: GOFLAGS+=-tags=debug
@ -97,15 +97,6 @@ lotus-miner: $(BUILD_DEPS)
.PHONY: lotus-miner .PHONY: lotus-miner
BINS+=lotus-miner BINS+=lotus-miner
lotus-provider: $(BUILD_DEPS)
rm -f lotus-provider
$(GOCC) build $(GOFLAGS) -o lotus-provider ./cmd/lotus-provider
.PHONY: lotus-provider
BINS+=lotus-provider
lp2k: GOFLAGS+=-tags=2k
lp2k: lotus-provider
lotus-worker: $(BUILD_DEPS) lotus-worker: $(BUILD_DEPS)
rm -f lotus-worker rm -f lotus-worker
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker $(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
@ -124,13 +115,13 @@ lotus-gateway: $(BUILD_DEPS)
.PHONY: lotus-gateway .PHONY: lotus-gateway
BINS+=lotus-gateway BINS+=lotus-gateway
build: lotus lotus-miner lotus-worker lotus-provider build: lotus lotus-miner lotus-worker
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \ @[[ $$(type -P "lotus") ]] && echo "Caution: you have \
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
.PHONY: build .PHONY: build
install: install-daemon install-miner install-worker install-provider install: install-daemon install-miner install-worker
install-daemon: install-daemon:
install -C ./lotus /usr/local/bin/lotus install -C ./lotus /usr/local/bin/lotus
@ -138,9 +129,6 @@ install-daemon:
install-miner: install-miner:
install -C ./lotus-miner /usr/local/bin/lotus-miner install -C ./lotus-miner /usr/local/bin/lotus-miner
install-provider:
install -C ./lotus-provider /usr/local/bin/lotus-provider
install-worker: install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker install -C ./lotus-worker /usr/local/bin/lotus-worker
@ -156,9 +144,6 @@ uninstall-daemon:
uninstall-miner: uninstall-miner:
rm -f /usr/local/bin/lotus-miner rm -f /usr/local/bin/lotus-miner
uninstall-provider:
rm -f /usr/local/bin/lotus-provider
uninstall-worker: uninstall-worker:
rm -f /usr/local/bin/lotus-worker rm -f /usr/local/bin/lotus-worker
@ -256,14 +241,6 @@ install-miner-service: install-miner install-daemon-service
@echo @echo
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup." @echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup."
install-provider-service: install-provider install-daemon-service
mkdir -p /etc/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-provider.service /etc/systemd/system/lotus-provider.service
systemctl daemon-reload
@echo
@echo "lotus-provider service installed. Don't forget to run 'sudo systemctl start lotus-provider' to start it and 'sudo systemctl enable lotus-provider' for it to be enabled on startup."
install-main-services: install-miner-service install-main-services: install-miner-service
install-all-services: install-main-services install-all-services: install-main-services
@ -282,12 +259,6 @@ clean-miner-service:
rm -f /etc/systemd/system/lotus-miner.service rm -f /etc/systemd/system/lotus-miner.service
systemctl daemon-reload systemctl daemon-reload
clean-provider-service:
-systemctl stop lotus-provider
-systemctl disable lotus-provider
rm -f /etc/systemd/system/lotus-provider.service
systemctl daemon-reload
clean-main-services: clean-daemon-service clean-main-services: clean-daemon-service
clean-all-services: clean-main-services clean-all-services: clean-main-services
@ -323,8 +294,7 @@ actors-code-gen:
$(GOCC) run ./chain/actors/agen $(GOCC) run ./chain/actors/agen
$(GOCC) fmt ./... $(GOCC) fmt ./...
actors-gen: actors-code-gen actors-gen: actors-code-gen fiximports
./scripts/fiximports
.PHONY: actors-gen .PHONY: actors-gen
bundle-gen: bundle-gen:
@ -358,7 +328,7 @@ docsgen-md-bin: api-gen actors-gen
docsgen-openrpc-bin: api-gen actors-gen docsgen-openrpc-bin: api-gen actors-gen
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd $(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-provider docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
docsgen-md-full: docsgen-md-bin docsgen-md-full: docsgen-md-bin
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md ./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
@ -367,8 +337,6 @@ docsgen-md-storage: docsgen-md-bin
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md ./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
docsgen-md-worker: docsgen-md-bin docsgen-md-worker: docsgen-md-bin
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md ./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
docsgen-md-provider: docsgen-md-bin
./docgen-md "api/api_lp.go" "Provider" "api" "./api" > documentation/en/api-v0-methods-provider.md
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
@ -386,23 +354,21 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin
fiximports: fiximports:
./scripts/fiximports ./scripts/fiximports
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
./scripts/fiximports
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'" @echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
.PHONY: gen .PHONY: gen
jen: gen jen: gen
snap: lotus lotus-miner lotus-worker lotus-provider snap: lotus lotus-miner lotus-worker
snapcraft snapcraft
# snapcraft upload ./lotus_*.snap # snapcraft upload ./lotus_*.snap
# separate from gen because it needs binaries # separate from gen because it needs binaries
docsgen-cli: lotus lotus-miner lotus-worker lotus-provider docsgen-cli: lotus lotus-miner lotus-worker
python3 ./scripts/generate-lotus-cli.py python3 ./scripts/generate-lotus-cli.py
./lotus config default > documentation/en/default-lotus-config.toml ./lotus config default > documentation/en/default-lotus-config.toml
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml ./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
./lotus-provider config default > documentation/en/default-lotus-provider-config.toml
.PHONY: docsgen-cli .PHONY: docsgen-cli
print-%: print-%:

View File

@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l
#### Go #### Go
To build Lotus, you need a working installation of [Go 1.21.7 or higher](https://golang.org/dl/): To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/):
```bash ```bash
wget -c https://golang.org/dl/go1.21.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
``` ```
**TIP:** **TIP:**
@ -133,8 +133,6 @@ Note: The default branch `master` is the dev branch where the latest new feature
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain). 6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain).
7. (Optional) Follow the [Setting Up Prometheus and Grafana](https://github.com/filecoin-project/lotus/tree/master/metrics/README.md) guide for detailed instructions on setting up a working monitoring system running against a local running lotus node.
## License ## License
Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE)

View File

@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin/v8/paych" "github.com/filecoin-project/go-state-types/builtin/v8/paych"
"github.com/filecoin-project/go-state-types/builtin/v9/market"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
@ -27,10 +28,8 @@ import (
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
@ -553,20 +552,14 @@ type FullNode interface {
// StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if // StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if
// pending allocation is not found. // pending allocation is not found.
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
// StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID
StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) //perm:read
// StateGetAllocation returns the allocation for a given address and allocation ID. // StateGetAllocation returns the allocation for a given address and allocation ID.
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
// StateGetAllocations returns the all the allocations for a given client. // StateGetAllocations returns the all the allocations for a given client.
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
// StateGetAllAllocations returns the all the allocations available in verified registry actor.
StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
// StateGetClaim returns the claim for a given address and claim ID. // StateGetClaim returns the claim for a given address and claim ID.
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read
// StateGetClaims returns the all the claims for a given provider. // StateGetClaims returns the all the claims for a given provider.
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
// StateGetAllClaims returns the all the claims available in verified registry actor.
StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
// StateComputeDataCID computes DataCID from a set of on-chain deals // StateComputeDataCID computes DataCID from a set of on-chain deals
StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) //perm:read StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) //perm:read
// StateLookupID retrieves the ID address of the given address // StateLookupID retrieves the ID address of the given address
@ -831,7 +824,7 @@ type FullNode interface {
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthUint64, error) //perm:read EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read
@ -875,26 +868,9 @@ type FullNode interface {
Web3ClientVersion(ctx context.Context) (string, error) //perm:read Web3ClientVersion(ctx context.Context) (string, error) //perm:read
// TraceAPI related methods // TraceAPI related methods
// Returns an OpenEthereum-compatible trace of the given block (implementing `trace_block`),
// translating Filecoin semantics into Ethereum semantics and tracing both EVM and FVM calls.
// //
// Features: // Returns traces created at given block
//
// - FVM actor create events, calls, etc. show up as if they were EVM smart contract events.
// - Native FVM call inputs are ABI-encoded (Solidity ABI) as if they were calls to a
// `handle_filecoin_method(uint64 method, uint64 codec, bytes params)` function
// (where `codec` is the IPLD codec of `params`).
// - Native FVM call outputs (return values) are ABI-encoded as `(uint32 exit_code, uint64
// codec, bytes output)` where `codec` is the IPLD codec of `output`.
//
// Limitations (for now):
//
// 1. Block rewards are not included in the trace.
// 2. SELFDESTRUCT operations are not included in the trace.
// 3. EVM smart contract "create" events always specify `0xfe` as the "code" for newly created EVM smart contracts.
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read
// Replays all transactions in a block returning the requested traces for each transaction // Replays all transactions in a block returning the requested traces for each transaction
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
@ -906,33 +882,6 @@ type FullNode interface {
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
// Actor events
// GetActorEventsRaw returns all user-programmed and built-in actor events that match the given
// filter.
// This is a request/response API.
// Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange
// configuration options and also the amount of historical data available in the node.
//
// This is an EXPERIMENTAL API and may be subject to change.
GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) //perm:read
// SubscribeActorEventsRaw returns a long-lived stream of all user-programmed and built-in actor
// events that match the given filter.
// Events that match the given filter are written to the stream in real-time as they are emitted
// from the FVM.
// The response stream is closed when the client disconnects, when a ToHeight is specified and is
// reached, or if there is an error while writing an event to the stream.
// This API also allows clients to read all historical events matching the given filter before any
// real-time events are written to the response stream if the filter specifies an earlier
// FromHeight.
// Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange
// configuration options and also the amount of historical data available in the node.
//
// Note: this API is only available via websocket connections.
// This is an EXPERIMENTAL API and may be subject to change.
SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) //perm:read
} }
// reverse interface to the client, called after EthSubscribe // reverse interface to the client, called after EthSubscribe
@ -1168,47 +1117,9 @@ type MarketBalance struct {
Locked big.Int Locked big.Int
} }
type MarketDealState struct {
SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector
LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated
SlashEpoch abi.ChainEpoch // -1 if deal never slashed
}
func MakeDealState(mds market.DealState) MarketDealState {
return MarketDealState{
SectorStartEpoch: mds.SectorStartEpoch(),
LastUpdatedEpoch: mds.LastUpdatedEpoch(),
SlashEpoch: mds.SlashEpoch(),
}
}
type mstate struct {
s MarketDealState
}
func (m mstate) SectorStartEpoch() abi.ChainEpoch {
return m.s.SectorStartEpoch
}
func (m mstate) LastUpdatedEpoch() abi.ChainEpoch {
return m.s.LastUpdatedEpoch
}
func (m mstate) SlashEpoch() abi.ChainEpoch {
return m.s.SlashEpoch
}
func (m mstate) Equals(o market.DealState) bool {
return market.DealStatesEqual(m, o)
}
func (m MarketDealState) Iface() market.DealState {
return mstate{m}
}
type MarketDeal struct { type MarketDeal struct {
Proposal market.DealProposal Proposal market.DealProposal
State MarketDealState State market.DealState
} }
type RetrievalOrder struct { type RetrievalOrder struct {

View File

@ -114,7 +114,7 @@ type Gateway interface {
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error)
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthUint64, error) EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error)
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
@ -129,8 +129,4 @@ type Gateway interface {
Web3ClientVersion(ctx context.Context) (string, error) Web3ClientVersion(ctx context.Context) (string, error)
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error)
SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error)
ChainGetEvents(context.Context, cid.Cid) ([]types.Event, error)
} }

View File

@ -1,10 +0,0 @@
package api
import "context"
type LotusProvider interface {
Version(context.Context) (Version, error) //perm:admin
// Trigger shutdown
Shutdown(context.Context) error //perm:admin
}

View File

@ -24,7 +24,6 @@ import (
builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/storage/pipeline/piece"
"github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/pipeline/sealiface"
"github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface" "github.com/filecoin-project/lotus/storage/sealer/storiface"
@ -76,7 +75,7 @@ type StorageMiner interface {
// Add piece to an open sector. If no sectors with enough space are open, // Add piece to an open sector. If no sectors with enough space are open,
// either a new sector will be created, or this call will block until more // either a new sector will be created, or this call will block until more
// sectors can be created. // sectors can be created.
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (SectorOffset, error) //perm:admin SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin
SectorsUnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin SectorsUnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin
@ -354,21 +353,10 @@ type SectorLog struct {
} }
type SectorPiece struct { type SectorPiece struct {
Piece abi.PieceInfo Piece abi.PieceInfo
DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
// DealInfo is nil for pieces which do not appear in deals (e.g. filler pieces)
// NOTE: DDO pieces which aren't associated with a market deal and have no
// verified allocation will still have a non-nil DealInfo.
// nil DealInfo indicates that the piece is a filler, and has zero piece commitment.
DealInfo *piece.PieceDealInfo
} }
// DEPRECATED: Use piece.PieceDealInfo instead
type PieceDealInfo = piece.PieceDealInfo
// DEPRECATED: Use piece.DealSchedule instead
type DealSchedule = piece.DealSchedule
type SectorInfo struct { type SectorInfo struct {
SectorID abi.SectorNumber SectorID abi.SectorNumber
State SectorState State SectorState
@ -471,6 +459,28 @@ type SectorOffset struct {
Offset abi.PaddedPieceSize Offset abi.PaddedPieceSize
} }
// DealInfo is a tuple of deal identity and its schedule
type PieceDealInfo struct {
// "Old" builtin-market deal info
PublishCid *cid.Cid
DealID abi.DealID
DealProposal *market.DealProposal
// Common deal info
DealSchedule DealSchedule
// Best-effort deal asks
KeepUnsealed bool
}
// DealSchedule communicates the time interval of a storage deal. The deal must
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
// is invalid.
type DealSchedule struct {
StartEpoch abi.ChainEpoch
EndEpoch abi.ChainEpoch
}
// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that // DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that
// we expose through JSON-RPC to avoid clients having to depend on the // we expose through JSON-RPC to avoid clients having to depend on the
// dagstore lib. // dagstore lib.

View File

@ -14,8 +14,7 @@ import (
abi "github.com/filecoin-project/go-state-types/abi" abi "github.com/filecoin-project/go-state-types/abi"
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
market "github.com/filecoin-project/go-state-types/builtin/v9/market"
piece "github.com/filecoin-project/lotus/storage/pipeline/piece"
) )
var _ = xerrors.Errorf var _ = xerrors.Errorf
@ -36,7 +35,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
} }
// t.Channel (address.Address) (struct) // t.Channel (address.Address) (struct)
if len("Channel") > 8192 { if len("Channel") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Channel\" was too long") return xerrors.Errorf("Value in field \"Channel\" was too long")
} }
@ -52,7 +51,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
} }
// t.Vouchers ([]*paych.SignedVoucher) (slice) // t.Vouchers ([]*paych.SignedVoucher) (slice)
if len("Vouchers") > 8192 { if len("Vouchers") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Vouchers\" was too long") return xerrors.Errorf("Value in field \"Vouchers\" was too long")
} }
@ -63,7 +62,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
return err return err
} }
if len(t.Vouchers) > 8192 { if len(t.Vouchers) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Vouchers was too long") return xerrors.Errorf("Slice value in field t.Vouchers was too long")
} }
@ -74,11 +73,10 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
if err := v.MarshalCBOR(cw); err != nil { if err := v.MarshalCBOR(cw); err != nil {
return err return err
} }
} }
// t.WaitSentinel (cid.Cid) (struct) // t.WaitSentinel (cid.Cid) (struct)
if len("WaitSentinel") > 8192 { if len("WaitSentinel") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long") return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
} }
@ -125,7 +123,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
for i := uint64(0); i < n; i++ { for i := uint64(0); i < n; i++ {
{ {
sval, err := cbg.ReadStringWithMax(cr, 8192) sval, err := cbg.ReadString(cr)
if err != nil { if err != nil {
return err return err
} }
@ -152,7 +150,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 8192 { if extra > cbg.MaxLength {
return fmt.Errorf("t.Vouchers: array too large (%d)", extra) return fmt.Errorf("t.Vouchers: array too large (%d)", extra)
} }
@ -190,9 +188,9 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
} }
} }
} }
} }
// t.WaitSentinel (cid.Cid) (struct) // t.WaitSentinel (cid.Cid) (struct)
case "WaitSentinel": case "WaitSentinel":
@ -228,7 +226,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
} }
// t.Size (abi.UnpaddedPieceSize) (uint64) // t.Size (abi.UnpaddedPieceSize) (uint64)
if len("Size") > 8192 { if len("Size") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Size\" was too long") return xerrors.Errorf("Value in field \"Size\" was too long")
} }
@ -244,7 +242,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
} }
// t.Offset (abi.PaddedPieceSize) (uint64) // t.Offset (abi.PaddedPieceSize) (uint64)
if len("Offset") > 8192 { if len("Offset") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Offset\" was too long") return xerrors.Errorf("Value in field \"Offset\" was too long")
} }
@ -260,7 +258,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
} }
// t.SectorID (abi.SectorNumber) (uint64) // t.SectorID (abi.SectorNumber) (uint64)
if len("SectorID") > 8192 { if len("SectorID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"SectorID\" was too long") return xerrors.Errorf("Value in field \"SectorID\" was too long")
} }
@ -307,7 +305,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
for i := uint64(0); i < n; i++ { for i := uint64(0); i < n; i++ {
{ {
sval, err := cbg.ReadStringWithMax(cr, 8192) sval, err := cbg.ReadString(cr)
if err != nil { if err != nil {
return err return err
} }
@ -383,7 +381,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
} }
// t.Refs ([]api.SealedRef) (slice) // t.Refs ([]api.SealedRef) (slice)
if len("Refs") > 8192 { if len("Refs") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Refs\" was too long") return xerrors.Errorf("Value in field \"Refs\" was too long")
} }
@ -394,7 +392,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
return err return err
} }
if len(t.Refs) > 8192 { if len(t.Refs) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Refs was too long") return xerrors.Errorf("Slice value in field t.Refs was too long")
} }
@ -405,7 +403,6 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
if err := v.MarshalCBOR(cw); err != nil { if err := v.MarshalCBOR(cw); err != nil {
return err return err
} }
} }
return nil return nil
} }
@ -439,7 +436,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) {
for i := uint64(0); i < n; i++ { for i := uint64(0); i < n; i++ {
{ {
sval, err := cbg.ReadStringWithMax(cr, 8192) sval, err := cbg.ReadString(cr)
if err != nil { if err != nil {
return err return err
} }
@ -456,7 +453,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 8192 { if extra > cbg.MaxLength {
return fmt.Errorf("t.Refs: array too large (%d)", extra) return fmt.Errorf("t.Refs: array too large (%d)", extra)
} }
@ -484,7 +481,6 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) {
} }
} }
} }
} }
@ -509,7 +505,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
} }
// t.Epoch (abi.ChainEpoch) (int64) // t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > 8192 { if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long") return xerrors.Errorf("Value in field \"Epoch\" was too long")
} }
@ -531,7 +527,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
} }
// t.Value (abi.SealRandomness) (slice) // t.Value (abi.SealRandomness) (slice)
if len("Value") > 8192 { if len("Value") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Value\" was too long") return xerrors.Errorf("Value in field \"Value\" was too long")
} }
@ -542,7 +538,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
return err return err
} }
if len(t.Value) > 2097152 { if len(t.Value) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Value was too long") return xerrors.Errorf("Byte array in field t.Value was too long")
} }
@ -550,10 +546,9 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
return err return err
} }
if _, err := cw.Write(t.Value); err != nil { if _, err := cw.Write(t.Value[:]); err != nil {
return err return err
} }
return nil return nil
} }
@ -586,7 +581,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
for i := uint64(0); i < n; i++ { for i := uint64(0); i < n; i++ {
{ {
sval, err := cbg.ReadStringWithMax(cr, 8192) sval, err := cbg.ReadString(cr)
if err != nil { if err != nil {
return err return err
} }
@ -599,10 +594,10 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
case "Epoch": case "Epoch":
{ {
maj, extra, err := cr.ReadHeader() maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil { if err != nil {
return err return err
} }
var extraI int64
switch maj { switch maj {
case cbg.MajUnsignedInt: case cbg.MajUnsignedInt:
extraI = int64(extra) extraI = int64(extra)
@ -629,7 +624,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 2097152 { if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Value: byte array too large (%d)", extra) return fmt.Errorf("t.Value: byte array too large (%d)", extra)
} }
if maj != cbg.MajByteString { if maj != cbg.MajByteString {
@ -640,7 +635,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
t.Value = make([]uint8, extra) t.Value = make([]uint8, extra)
} }
if _, err := io.ReadFull(cr, t.Value); err != nil { if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
return err return err
} }
@ -665,7 +660,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
} }
// t.Epoch (abi.ChainEpoch) (int64) // t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > 8192 { if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long") return xerrors.Errorf("Value in field \"Epoch\" was too long")
} }
@ -687,7 +682,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
} }
// t.Value (abi.InteractiveSealRandomness) (slice) // t.Value (abi.InteractiveSealRandomness) (slice)
if len("Value") > 8192 { if len("Value") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Value\" was too long") return xerrors.Errorf("Value in field \"Value\" was too long")
} }
@ -698,7 +693,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
return err return err
} }
if len(t.Value) > 2097152 { if len(t.Value) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Value was too long") return xerrors.Errorf("Byte array in field t.Value was too long")
} }
@ -706,10 +701,9 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
return err return err
} }
if _, err := cw.Write(t.Value); err != nil { if _, err := cw.Write(t.Value[:]); err != nil {
return err return err
} }
return nil return nil
} }
@ -742,7 +736,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
for i := uint64(0); i < n; i++ { for i := uint64(0); i < n; i++ {
{ {
sval, err := cbg.ReadStringWithMax(cr, 8192) sval, err := cbg.ReadString(cr)
if err != nil { if err != nil {
return err return err
} }
@ -755,10 +749,10 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
case "Epoch": case "Epoch":
{ {
maj, extra, err := cr.ReadHeader() maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil { if err != nil {
return err return err
} }
var extraI int64
switch maj { switch maj {
case cbg.MajUnsignedInt: case cbg.MajUnsignedInt:
extraI = int64(extra) extraI = int64(extra)
@ -785,7 +779,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 2097152 { if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Value: byte array too large (%d)", extra) return fmt.Errorf("t.Value: byte array too large (%d)", extra)
} }
if maj != cbg.MajByteString { if maj != cbg.MajByteString {
@ -796,7 +790,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
t.Value = make([]uint8, extra) t.Value = make([]uint8, extra)
} }
if _, err := io.ReadFull(cr, t.Value); err != nil { if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
return err return err
} }
@ -808,6 +802,239 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
return nil return nil
} }
func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write([]byte{165}); err != nil {
return err
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err
}
if _, err := cw.WriteString(string("DealID")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
return err
}
// t.PublishCid (cid.Cid) (struct)
if len("PublishCid") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
return err
}
if _, err := cw.WriteString(string("PublishCid")); err != nil {
return err
}
if t.PublishCid == nil {
if _, err := cw.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCid(cw, *t.PublishCid); err != nil {
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
}
}
// t.DealProposal (market.DealProposal) (struct)
if len("DealProposal") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
return err
}
if _, err := cw.WriteString(string("DealProposal")); err != nil {
return err
}
if err := t.DealProposal.MarshalCBOR(cw); err != nil {
return err
}
// t.DealSchedule (api.DealSchedule) (struct)
if len("DealSchedule") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
return err
}
if _, err := cw.WriteString(string("DealSchedule")); err != nil {
return err
}
if err := t.DealSchedule.MarshalCBOR(cw); err != nil {
return err
}
// t.KeepUnsealed (bool) (bool)
if len("KeepUnsealed") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
return err
}
if _, err := cw.WriteString(string("KeepUnsealed")); err != nil {
return err
}
if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil {
return err
}
return nil
}
func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
*t = PieceDealInfo{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadString(cr)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.DealID = abi.DealID(extra)
}
// t.PublishCid (cid.Cid) (struct)
case "PublishCid":
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
}
t.PublishCid = &c
}
}
// t.DealProposal (market.DealProposal) (struct)
case "DealProposal":
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.DealProposal = new(market.DealProposal)
if err := t.DealProposal.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
}
}
}
// t.DealSchedule (api.DealSchedule) (struct)
case "DealSchedule":
{
if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
}
}
// t.KeepUnsealed (bool) (bool)
case "KeepUnsealed":
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajOther {
return fmt.Errorf("booleans must be major type 7")
}
switch extra {
case 20:
t.KeepUnsealed = false
case 21:
t.KeepUnsealed = true
default:
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
return nil
}
func (t *SectorPiece) MarshalCBOR(w io.Writer) error { func (t *SectorPiece) MarshalCBOR(w io.Writer) error {
if t == nil { if t == nil {
_, err := w.Write(cbg.CborNull) _, err := w.Write(cbg.CborNull)
@ -821,7 +1048,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error {
} }
// t.Piece (abi.PieceInfo) (struct) // t.Piece (abi.PieceInfo) (struct)
if len("Piece") > 8192 { if len("Piece") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Piece\" was too long") return xerrors.Errorf("Value in field \"Piece\" was too long")
} }
@ -836,8 +1063,8 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error {
return err return err
} }
// t.DealInfo (piece.PieceDealInfo) (struct) // t.DealInfo (api.PieceDealInfo) (struct)
if len("DealInfo") > 8192 { if len("DealInfo") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealInfo\" was too long") return xerrors.Errorf("Value in field \"DealInfo\" was too long")
} }
@ -883,7 +1110,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) {
for i := uint64(0); i < n; i++ { for i := uint64(0); i < n; i++ {
{ {
sval, err := cbg.ReadStringWithMax(cr, 8192) sval, err := cbg.ReadString(cr)
if err != nil { if err != nil {
return err return err
} }
@ -902,7 +1129,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) {
} }
} }
// t.DealInfo (piece.PieceDealInfo) (struct) // t.DealInfo (api.PieceDealInfo) (struct)
case "DealInfo": case "DealInfo":
{ {
@ -915,7 +1142,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) {
if err := cr.UnreadByte(); err != nil { if err := cr.UnreadByte(); err != nil {
return err return err
} }
t.DealInfo = new(piece.PieceDealInfo) t.DealInfo = new(PieceDealInfo)
if err := t.DealInfo.UnmarshalCBOR(cr); err != nil { if err := t.DealInfo.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err)
} }
@ -931,3 +1158,160 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) {
return nil return nil
} }
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write([]byte{162}); err != nil {
return err
}
// t.EndEpoch (abi.ChainEpoch) (int64)
if len("EndEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
return err
}
if _, err := cw.WriteString(string("EndEpoch")); err != nil {
return err
}
if t.EndEpoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
return err
}
}
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err
}
if _, err := cw.WriteString(string("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
return err
}
}
return nil
}
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
*t = DealSchedule{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajMap {
return fmt.Errorf("cbor input should be of type map")
}
if extra > cbg.MaxLength {
return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
}
var name string
n := extra
for i := uint64(0); i < n; i++ {
{
sval, err := cbg.ReadString(cr)
if err != nil {
return err
}
name = string(sval)
}
switch name {
// t.EndEpoch (abi.ChainEpoch) (int64)
case "EndEpoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.EndEpoch = abi.ChainEpoch(extraI)
}
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
}
}
return nil
}

View File

@ -15,16 +15,6 @@ import (
"github.com/filecoin-project/lotus/lib/rpcenc" "github.com/filecoin-project/lotus/lib/rpcenc"
) )
// NewProviderRpc creates a new http jsonrpc client.
func NewProviderRpc(ctx context.Context, addr string, requestHeader http.Header) (api.LotusProvider, jsonrpc.ClientCloser, error) {
var res v1api.LotusProviderStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
return &res, closer, err
}
// NewCommonRPCV0 creates a new http jsonrpc client. // NewCommonRPCV0 creates a new http jsonrpc client.
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) { func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
var res v0api.CommonNetStruct var res v0api.CommonNetStruct

View File

@ -40,7 +40,6 @@ import (
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
@ -153,14 +152,13 @@ func init() {
addExample(map[verifreg.ClaimId]verifreg.Claim{}) addExample(map[verifreg.ClaimId]verifreg.Claim{})
addExample(map[string]int{"name": 42}) addExample(map[string]int{"name": 42})
addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()}) addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
addExample(abi.ActorID(1000))
addExample(map[string]types.Actor{
"t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
})
addExample(&types.ExecutionTrace{ addExample(&types.ExecutionTrace{
Msg: ExampleValue("init", reflect.TypeOf(types.MessageTrace{}), nil).(types.MessageTrace), Msg: ExampleValue("init", reflect.TypeOf(types.MessageTrace{}), nil).(types.MessageTrace),
MsgRct: ExampleValue("init", reflect.TypeOf(types.ReturnTrace{}), nil).(types.ReturnTrace), MsgRct: ExampleValue("init", reflect.TypeOf(types.ReturnTrace{}), nil).(types.ReturnTrace),
}) })
addExample(map[string]types.Actor{
"t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
})
addExample(map[string]api.MarketDeal{ addExample(map[string]api.MarketDeal{
"t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), "t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
}) })
@ -209,6 +207,7 @@ func init() {
si := uint64(12) si := uint64(12)
addExample(&si) addExample(&si)
addExample(retrievalmarket.DealID(5)) addExample(retrievalmarket.DealID(5))
addExample(abi.ActorID(1000))
addExample(map[string]cid.Cid{}) addExample(map[string]cid.Cid{})
addExample(map[string][]api.SealedRef{ addExample(map[string][]api.SealedRef{
"98000": { "98000": {
@ -407,32 +406,6 @@ func init() {
percent := types.Percent(123) percent := types.Percent(123)
addExample(percent) addExample(percent)
addExample(&percent) addExample(&percent)
addExample(&miner.PieceActivationManifest{
CID: c,
Size: 2032,
VerifiedAllocationKey: nil,
Notify: nil,
})
addExample(&types.ActorEventBlock{
Codec: 0x51,
Value: []byte("ddata"),
})
addExample(&types.ActorEventFilter{
Addresses: []address.Address{addr},
Fields: map[string][]types.ActorEventBlock{
"abc": {
{
Codec: 0x51,
Value: []byte("ddata"),
},
},
},
FromHeight: epochPtr(1010),
ToHeight: epochPtr(1020),
})
} }
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
@ -459,10 +432,6 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
i = &api.GatewayStruct{} i = &api.GatewayStruct{}
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem() t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal)) permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
case "Provider":
i = &api.LotusProviderStruct{}
t = reflect.TypeOf(new(struct{ api.LotusProvider })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.LotusProviderStruct{}.Internal))
default: default:
panic("unknown type") panic("unknown type")
} }
@ -538,11 +507,6 @@ func exampleStruct(method string, t, parent reflect.Type) interface{} {
return ns.Interface() return ns.Interface()
} }
func epochPtr(ei int64) *abi.ChainEpoch {
ep := abi.ChainEpoch(ei)
return &ep
}
type Visitor struct { type Visitor struct {
Root string Root string
Methods map[string]ast.Node Methods map[string]ast.Node

View File

@ -1042,7 +1042,7 @@ func (mr *MockFullNodeMockRecorder) EthChainId(arg0 interface{}) *gomock.Call {
} }
// EthEstimateGas mocks base method. // EthEstimateGas mocks base method.
func (m *MockFullNode) EthEstimateGas(arg0 context.Context, arg1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { func (m *MockFullNode) EthEstimateGas(arg0 context.Context, arg1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EthEstimateGas", arg0, arg1) ret := m.ctrl.Call(m, "EthEstimateGas", arg0, arg1)
ret0, _ := ret[0].(ethtypes.EthUint64) ret0, _ := ret[0].(ethtypes.EthUint64)
@ -1626,21 +1626,6 @@ func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3)
} }
// GetActorEventsRaw mocks base method.
func (m *MockFullNode) GetActorEventsRaw(arg0 context.Context, arg1 *types.ActorEventFilter) ([]*types.ActorEvent, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetActorEventsRaw", arg0, arg1)
ret0, _ := ret[0].([]*types.ActorEvent)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetActorEventsRaw indicates an expected call of GetActorEventsRaw.
func (mr *MockFullNodeMockRecorder) GetActorEventsRaw(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActorEventsRaw", reflect.TypeOf((*MockFullNode)(nil).GetActorEventsRaw), arg0, arg1)
}
// ID mocks base method. // ID mocks base method.
func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -3203,36 +3188,6 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
} }
// StateGetAllAllocations mocks base method.
func (m *MockFullNode) StateGetAllAllocations(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetAllAllocations", arg0, arg1)
ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetAllAllocations indicates an expected call of StateGetAllAllocations.
func (mr *MockFullNodeMockRecorder) StateGetAllAllocations(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllAllocations), arg0, arg1)
}
// StateGetAllClaims mocks base method.
func (m *MockFullNode) StateGetAllClaims(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetAllClaims", arg0, arg1)
ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetAllClaims indicates an expected call of StateGetAllClaims.
func (mr *MockFullNodeMockRecorder) StateGetAllClaims(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetAllClaims), arg0, arg1)
}
// StateGetAllocation mocks base method. // StateGetAllocation mocks base method.
func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -3263,21 +3218,6 @@ func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1,
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2)
} }
// StateGetAllocationIdForPendingDeal mocks base method.
func (m *MockFullNode) StateGetAllocationIdForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (verifreg.AllocationId, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetAllocationIdForPendingDeal", arg0, arg1, arg2)
ret0, _ := ret[0].(verifreg.AllocationId)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetAllocationIdForPendingDeal indicates an expected call of StateGetAllocationIdForPendingDeal.
func (mr *MockFullNodeMockRecorder) StateGetAllocationIdForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationIdForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationIdForPendingDeal), arg0, arg1, arg2)
}
// StateGetAllocations mocks base method. // StateGetAllocations mocks base method.
func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -3983,21 +3923,6 @@ func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 in
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4)
} }
// SubscribeActorEventsRaw mocks base method.
func (m *MockFullNode) SubscribeActorEventsRaw(arg0 context.Context, arg1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SubscribeActorEventsRaw", arg0, arg1)
ret0, _ := ret[0].(<-chan *types.ActorEvent)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SubscribeActorEventsRaw indicates an expected call of SubscribeActorEventsRaw.
func (mr *MockFullNodeMockRecorder) SubscribeActorEventsRaw(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeActorEventsRaw", reflect.TypeOf((*MockFullNode)(nil).SubscribeActorEventsRaw), arg0, arg1)
}
// SyncCheckBad mocks base method. // SyncCheckBad mocks base method.
func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -41,12 +41,6 @@ func PermissionedWorkerAPI(a Worker) Worker {
return &out return &out
} }
func PermissionedAPI[T, P any](a T) *P {
var out P
permissionedProxies(a, &out)
return &out
}
func PermissionedWalletAPI(a Wallet) Wallet { func PermissionedWalletAPI(a Wallet) Wallet {
var out WalletStruct var out WalletStruct
permissionedProxies(a, &out) permissionedProxies(a, &out)

View File

@ -35,13 +35,11 @@ import (
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/journal/alerting"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo/imports" "github.com/filecoin-project/lotus/node/repo/imports"
"github.com/filecoin-project/lotus/storage/pipeline/piece"
"github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/pipeline/sealiface"
"github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks" "github.com/filecoin-project/lotus/storage/sealer/sealtasks"
@ -257,7 +255,7 @@ type FullNodeMethods struct {
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"` EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"`
EthEstimateGas func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) `perm:"read"` EthEstimateGas func(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) `perm:"read"`
EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) `perm:"read"` EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) `perm:"read"`
@ -335,8 +333,6 @@ type FullNodeMethods struct {
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"` GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"`
GetActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) `perm:"read"`
MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"`
MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"` MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"`
@ -487,16 +483,10 @@ type FullNodeMethods struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateGetAllAllocations func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"`
StateGetAllClaims func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"`
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
StateGetAllocationIdForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) `perm:"read"`
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"`
StateGetBeaconEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` StateGetBeaconEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
@ -591,8 +581,6 @@ type FullNodeMethods struct {
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"` StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"`
SubscribeActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) `perm:"read"`
SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"` SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"`
SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"` SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"`
@ -655,8 +643,6 @@ type GatewayMethods struct {
ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `` ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) ``
ChainGetEvents func(p0 context.Context, p1 cid.Cid) ([]types.Event, error) ``
ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `` ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) ``
ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) ``
@ -693,7 +679,7 @@ type GatewayMethods struct {
EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `` EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) ``
EthEstimateGas func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) `` EthEstimateGas func(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) ``
EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) `` EthFeeHistory func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) ``
@ -761,8 +747,6 @@ type GatewayMethods struct {
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
GetActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) ``
MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) `` MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) ``
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `` MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
@ -837,8 +821,6 @@ type GatewayMethods struct {
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
SubscribeActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) ``
Version func(p0 context.Context) (APIVersion, error) `` Version func(p0 context.Context) (APIVersion, error) ``
WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) ``
@ -849,19 +831,6 @@ type GatewayMethods struct {
type GatewayStub struct { type GatewayStub struct {
} }
type LotusProviderStruct struct {
Internal LotusProviderMethods
}
type LotusProviderMethods struct {
Shutdown func(p0 context.Context) error `perm:"admin"`
Version func(p0 context.Context) (Version, error) `perm:"admin"`
}
type LotusProviderStub struct {
}
type NetStruct struct { type NetStruct struct {
Internal NetMethods Internal NetMethods
} }
@ -1103,7 +1072,7 @@ type StorageMinerMethods struct {
SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) `perm:"admin"` SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"`
SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"`
@ -2165,14 +2134,14 @@ func (s *FullNodeStub) EthChainId(p0 context.Context) (ethtypes.EthUint64, error
return *new(ethtypes.EthUint64), ErrNotSupported return *new(ethtypes.EthUint64), ErrNotSupported
} }
func (s *FullNodeStruct) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { func (s *FullNodeStruct) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
if s.Internal.EthEstimateGas == nil { if s.Internal.EthEstimateGas == nil {
return *new(ethtypes.EthUint64), ErrNotSupported return *new(ethtypes.EthUint64), ErrNotSupported
} }
return s.Internal.EthEstimateGas(p0, p1) return s.Internal.EthEstimateGas(p0, p1)
} }
func (s *FullNodeStub) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { func (s *FullNodeStub) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
return *new(ethtypes.EthUint64), ErrNotSupported return *new(ethtypes.EthUint64), ErrNotSupported
} }
@ -2594,17 +2563,6 @@ func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messa
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) {
if s.Internal.GetActorEventsRaw == nil {
return *new([]*types.ActorEvent), ErrNotSupported
}
return s.Internal.GetActorEventsRaw(p0, p1)
}
func (s *FullNodeStub) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) {
return *new([]*types.ActorEvent), ErrNotSupported
}
func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) {
if s.Internal.MarketAddBalance == nil { if s.Internal.MarketAddBalance == nil {
return *new(cid.Cid), ErrNotSupported return *new(cid.Cid), ErrNotSupported
@ -3430,28 +3388,6 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
if s.Internal.StateGetAllAllocations == nil {
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
}
return s.Internal.StateGetAllAllocations(p0, p1)
}
func (s *FullNodeStub) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
}
func (s *FullNodeStruct) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
if s.Internal.StateGetAllClaims == nil {
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
}
return s.Internal.StateGetAllClaims(p0, p1)
}
func (s *FullNodeStub) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
}
func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) { func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
if s.Internal.StateGetAllocation == nil { if s.Internal.StateGetAllocation == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -3474,17 +3410,6 @@ func (s *FullNodeStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 a
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) {
if s.Internal.StateGetAllocationIdForPendingDeal == nil {
return *new(verifreg.AllocationId), ErrNotSupported
}
return s.Internal.StateGetAllocationIdForPendingDeal(p0, p1, p2)
}
func (s *FullNodeStub) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) {
return *new(verifreg.AllocationId), ErrNotSupported
}
func (s *FullNodeStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { func (s *FullNodeStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
if s.Internal.StateGetAllocations == nil { if s.Internal.StateGetAllocations == nil {
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
@ -4002,17 +3927,6 @@ func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) {
if s.Internal.SubscribeActorEventsRaw == nil {
return nil, ErrNotSupported
}
return s.Internal.SubscribeActorEventsRaw(p0, p1)
}
func (s *FullNodeStub) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) {
if s.Internal.SyncCheckBad == nil { if s.Internal.SyncCheckBad == nil {
return "", ErrNotSupported return "", ErrNotSupported
@ -4288,17 +4202,6 @@ func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*Bl
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) ChainGetEvents(p0 context.Context, p1 cid.Cid) ([]types.Event, error) {
if s.Internal.ChainGetEvents == nil {
return *new([]types.Event), ErrNotSupported
}
return s.Internal.ChainGetEvents(p0, p1)
}
func (s *GatewayStub) ChainGetEvents(p0 context.Context, p1 cid.Cid) ([]types.Event, error) {
return *new([]types.Event), ErrNotSupported
}
func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) {
if s.Internal.ChainGetGenesis == nil { if s.Internal.ChainGetGenesis == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -4497,14 +4400,14 @@ func (s *GatewayStub) EthChainId(p0 context.Context) (ethtypes.EthUint64, error)
return *new(ethtypes.EthUint64), ErrNotSupported return *new(ethtypes.EthUint64), ErrNotSupported
} }
func (s *GatewayStruct) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { func (s *GatewayStruct) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
if s.Internal.EthEstimateGas == nil { if s.Internal.EthEstimateGas == nil {
return *new(ethtypes.EthUint64), ErrNotSupported return *new(ethtypes.EthUint64), ErrNotSupported
} }
return s.Internal.EthEstimateGas(p0, p1) return s.Internal.EthEstimateGas(p0, p1)
} }
func (s *GatewayStub) EthEstimateGas(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthUint64, error) { func (s *GatewayStub) EthEstimateGas(p0 context.Context, p1 ethtypes.EthCall) (ethtypes.EthUint64, error) {
return *new(ethtypes.EthUint64), ErrNotSupported return *new(ethtypes.EthUint64), ErrNotSupported
} }
@ -4871,17 +4774,6 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) {
if s.Internal.GetActorEventsRaw == nil {
return *new([]*types.ActorEvent), ErrNotSupported
}
return s.Internal.GetActorEventsRaw(p0, p1)
}
func (s *GatewayStub) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) {
return *new([]*types.ActorEvent), ErrNotSupported
}
func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) {
if s.Internal.MinerGetBaseInfo == nil { if s.Internal.MinerGetBaseInfo == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -5289,17 +5181,6 @@ func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) {
if s.Internal.SubscribeActorEventsRaw == nil {
return nil, ErrNotSupported
}
return s.Internal.SubscribeActorEventsRaw(p0, p1)
}
func (s *GatewayStub) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) { func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) {
if s.Internal.Version == nil { if s.Internal.Version == nil {
return *new(APIVersion), ErrNotSupported return *new(APIVersion), ErrNotSupported
@ -5333,28 +5214,6 @@ func (s *GatewayStub) Web3ClientVersion(p0 context.Context) (string, error) {
return "", ErrNotSupported return "", ErrNotSupported
} }
func (s *LotusProviderStruct) Shutdown(p0 context.Context) error {
if s.Internal.Shutdown == nil {
return ErrNotSupported
}
return s.Internal.Shutdown(p0)
}
func (s *LotusProviderStub) Shutdown(p0 context.Context) error {
return ErrNotSupported
}
func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) {
if s.Internal.Version == nil {
return *new(Version), ErrNotSupported
}
return s.Internal.Version(p0)
}
func (s *LotusProviderStub) Version(p0 context.Context) (Version, error) {
return *new(Version), ErrNotSupported
}
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) { func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
if s.Internal.ID == nil { if s.Internal.ID == nil {
return *new(peer.ID), ErrNotSupported return *new(peer.ID), ErrNotSupported
@ -6532,14 +6391,14 @@ func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorN
return ErrNotSupported return ErrNotSupported
} }
func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) {
if s.Internal.SectorAddPieceToAny == nil { if s.Internal.SectorAddPieceToAny == nil {
return *new(SectorOffset), ErrNotSupported return *new(SectorOffset), ErrNotSupported
} }
return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3) return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3)
} }
func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) {
return *new(SectorOffset), ErrNotSupported return *new(SectorOffset), ErrNotSupported
} }
@ -7583,7 +7442,6 @@ var _ CommonNet = new(CommonNetStruct)
var _ EthSubscriber = new(EthSubscriberStruct) var _ EthSubscriber = new(EthSubscriberStruct)
var _ FullNode = new(FullNodeStruct) var _ FullNode = new(FullNodeStruct)
var _ Gateway = new(GatewayStruct) var _ Gateway = new(GatewayStruct)
var _ LotusProvider = new(LotusProviderStruct)
var _ Net = new(NetStruct) var _ Net = new(NetStruct)
var _ Signable = new(SignableStruct) var _ Signable = new(SignableStruct)
var _ StorageMiner = new(StorageMinerStruct) var _ StorageMiner = new(StorageMinerStruct)

View File

@ -56,17 +56,9 @@ type PubsubScore struct {
Score *pubsub.PeerScoreSnapshot Score *pubsub.PeerScoreSnapshot
} }
// MessageSendSpec contains optional fields which modify message sending behavior
type MessageSendSpec struct { type MessageSendSpec struct {
// MaxFee specifies a cap on network fees related to this message MaxFee abi.TokenAmount
MaxFee abi.TokenAmount
// MsgUuid specifies a unique message identifier which can be used on node (or node cluster)
// level to prevent double-sends of messages even when nonce generation is not handled by sender
MsgUuid uuid.UUID MsgUuid uuid.UUID
// MaximizeFeeCap makes message FeeCap be based entirely on MaxFee
MaximizeFeeCap bool
} }
type MpoolMessageWhole struct { type MpoolMessageWhole struct {
@ -349,8 +341,6 @@ type ForkUpgradeParams struct {
UpgradeLightningHeight abi.ChainEpoch UpgradeLightningHeight abi.ChainEpoch
UpgradeThunderHeight abi.ChainEpoch UpgradeThunderHeight abi.ChainEpoch
UpgradeWatermelonHeight abi.ChainEpoch UpgradeWatermelonHeight abi.ChainEpoch
UpgradeDragonHeight abi.ChainEpoch
UpgradePhoenixHeight abi.ChainEpoch
} }
type NonceMapType map[address.Address]uint64 type NonceMapType map[address.Address]uint64

View File

@ -537,14 +537,10 @@ type FullNode interface {
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
// StateGetAllocations returns the all the allocations for a given client. // StateGetAllocations returns the all the allocations for a given client.
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
// StateGetAllAllocations returns the all the allocations available in verified registry actor.
StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
// StateGetClaim returns the claim for a given address and claim ID. // StateGetClaim returns the claim for a given address and claim ID.
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read
// StateGetClaims returns the all the claims for a given provider. // StateGetClaims returns the all the claims for a given provider.
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
// StateGetAllClaims returns the all the claims available in verified registry actor.
StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
// StateLookupID retrieves the ID address of the given address // StateLookupID retrieves the ID address of the given address
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateAccountKey returns the public key address of the given ID address // StateAccountKey returns the public key address of the given ID address

View File

@ -280,10 +280,6 @@ type FullNodeMethods struct {
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateGetAllAllocations func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"`
StateGetAllClaims func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"`
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
@ -1841,28 +1837,6 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
if s.Internal.StateGetAllAllocations == nil {
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
}
return s.Internal.StateGetAllAllocations(p0, p1)
}
func (s *FullNodeStub) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
}
func (s *FullNodeStruct) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
if s.Internal.StateGetAllClaims == nil {
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
}
return s.Internal.StateGetAllClaims(p0, p1)
}
func (s *FullNodeStub) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
}
func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) { func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
if s.Internal.StateGetAllocation == nil { if s.Internal.StateGetAllocation == nil {
return nil, ErrNotSupported return nil, ErrNotSupported

View File

@ -2338,36 +2338,6 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
} }
// StateGetAllAllocations mocks base method.
func (m *MockFullNode) StateGetAllAllocations(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetAllAllocations", arg0, arg1)
ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetAllAllocations indicates an expected call of StateGetAllAllocations.
func (mr *MockFullNodeMockRecorder) StateGetAllAllocations(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllAllocations), arg0, arg1)
}
// StateGetAllClaims mocks base method.
func (m *MockFullNode) StateGetAllClaims(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateGetAllClaims", arg0, arg1)
ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// StateGetAllClaims indicates an expected call of StateGetAllClaims.
func (mr *MockFullNodeMockRecorder) StateGetAllClaims(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetAllClaims), arg0, arg1)
}
// StateGetAllocation mocks base method. // StateGetAllocation mocks base method.
func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()

View File

@ -12,5 +12,3 @@ type RawFullNodeAPI FullNode
func PermissionedFullAPI(a FullNode) FullNode { func PermissionedFullAPI(a FullNode) FullNode {
return api.PermissionedFullAPI(a) return api.PermissionedFullAPI(a)
} }
type LotusProviderStruct = api.LotusProviderStruct

View File

@ -59,8 +59,6 @@ var (
MinerAPIVersion0 = newVer(1, 5, 0) MinerAPIVersion0 = newVer(1, 5, 0)
WorkerAPIVersion0 = newVer(1, 7, 0) WorkerAPIVersion0 = newVer(1, 7, 0)
ProviderAPIVersion0 = newVer(1, 0, 0)
) )
//nolint:varcheck,deadcode //nolint:varcheck,deadcode

View File

@ -44,7 +44,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
} }
// t.Cid ([]cid.Cid) (slice) // t.Cid ([]cid.Cid) (slice)
if len(t.Cid) > 8192 { if len(t.Cid) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Cid was too long") return xerrors.Errorf("Slice value in field t.Cid was too long")
} }
@ -60,7 +60,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
} }
// t.Data ([][]uint8) (slice) // t.Data ([][]uint8) (slice)
if len(t.Data) > 8192 { if len(t.Data) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Data was too long") return xerrors.Errorf("Slice value in field t.Data was too long")
} }
@ -68,7 +68,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range t.Data { for _, v := range t.Data {
if len(v) > 2097152 { if len(v) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field v was too long") return xerrors.Errorf("Byte array in field v was too long")
} }
@ -76,10 +76,9 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
return err return err
} }
if _, err := cw.Write(v); err != nil { if _, err := cw.Write(v[:]); err != nil {
return err return err
} }
} }
return nil return nil
} }
@ -141,7 +140,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 8192 { if extra > cbg.MaxLength {
return fmt.Errorf("t.Cid: array too large (%d)", extra) return fmt.Errorf("t.Cid: array too large (%d)", extra)
} }
@ -172,9 +171,9 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
t.Cid[i] = c t.Cid[i] = c
} }
} }
} }
// t.Data ([][]uint8) (slice) // t.Data ([][]uint8) (slice)
maj, extra, err = cr.ReadHeader() maj, extra, err = cr.ReadHeader()
@ -182,7 +181,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 8192 { if extra > cbg.MaxLength {
return fmt.Errorf("t.Data: array too large (%d)", extra) return fmt.Errorf("t.Data: array too large (%d)", extra)
} }
@ -208,7 +207,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 2097152 { if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra) return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra)
} }
if maj != cbg.MajByteString { if maj != cbg.MajByteString {
@ -219,12 +218,12 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
t.Data[i] = make([]uint8, extra) t.Data[i] = make([]uint8, extra)
} }
if _, err := io.ReadFull(cr, t.Data[i]); err != nil { if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil {
return err return err
} }
} }
} }
return nil return nil
} }
@ -254,7 +253,7 @@ func (t *NetRpcResp) MarshalCBOR(w io.Writer) error {
} }
// t.Data ([]uint8) (slice) // t.Data ([]uint8) (slice)
if len(t.Data) > 2097152 { if len(t.Data) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Data was too long") return xerrors.Errorf("Byte array in field t.Data was too long")
} }
@ -262,10 +261,9 @@ func (t *NetRpcResp) MarshalCBOR(w io.Writer) error {
return err return err
} }
if _, err := cw.Write(t.Data); err != nil { if _, err := cw.Write(t.Data[:]); err != nil {
return err return err
} }
return nil return nil
} }
@ -326,7 +324,7 @@ func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) {
return err return err
} }
if extra > 2097152 { if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Data: byte array too large (%d)", extra) return fmt.Errorf("t.Data: byte array too large (%d)", extra)
} }
if maj != cbg.MajByteString { if maj != cbg.MajByteString {
@ -337,10 +335,9 @@ func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) {
t.Data = make([]uint8, extra) t.Data = make([]uint8, extra)
} }
if _, err := io.ReadFull(cr, t.Data); err != nil { if _, err := io.ReadFull(cr, t.Data[:]); err != nil {
return err return err
} }
return nil return nil
} }
@ -364,7 +361,7 @@ func (t *NetRpcErr) MarshalCBOR(w io.Writer) error {
} }
// t.Msg (string) (string) // t.Msg (string) (string)
if len(t.Msg) > 8192 { if len(t.Msg) > cbg.MaxLength {
return xerrors.Errorf("Value in field t.Msg was too long") return xerrors.Errorf("Value in field t.Msg was too long")
} }
@ -429,7 +426,7 @@ func (t *NetRpcErr) UnmarshalCBOR(r io.Reader) (err error) {
// t.Msg (string) (string) // t.Msg (string) (string)
{ {
sval, err := cbg.ReadStringWithMax(cr, 8192) sval, err := cbg.ReadString(cr)
if err != nil { if err != nil {
return err return err
} }

View File

@ -183,17 +183,3 @@ func (b *idstore) Close() error {
func (b *idstore) Flush(ctx context.Context) error { func (b *idstore) Flush(ctx context.Context) error {
return b.bs.Flush(ctx) return b.bs.Flush(ctx)
} }
func (b *idstore) CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error {
if bs, ok := b.bs.(BlockstoreGC); ok {
return bs.CollectGarbage(ctx, options...)
}
return xerrors.Errorf("not supported")
}
func (b *idstore) GCOnce(ctx context.Context, options ...BlockstoreGCOption) error {
if bs, ok := b.bs.(BlockstoreGCOnce); ok {
return bs.GCOnce(ctx, options...)
}
return xerrors.Errorf("not supported")
}

View File

@ -5,7 +5,9 @@ import (
"context" "context"
"io" "io"
"github.com/ipfs/boxo/path" iface "github.com/ipfs/boxo/coreiface"
"github.com/ipfs/boxo/coreiface/options"
"github.com/ipfs/boxo/coreiface/path"
blocks "github.com/ipfs/go-block-format" blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multiaddr"
@ -13,8 +15,6 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
rpc "github.com/filecoin-project/kubo-api-client" rpc "github.com/filecoin-project/kubo-api-client"
iface "github.com/filecoin-project/kubo-api-client/coreiface"
"github.com/filecoin-project/kubo-api-client/coreiface/options"
) )
type IPFSBlockstore struct { type IPFSBlockstore struct {
@ -83,7 +83,7 @@ func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
} }
func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
_, err := i.offlineAPI.Block().Stat(ctx, path.FromCid(cid)) _, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid))
if err != nil { if err != nil {
// The underlying client is running in Offline mode. // The underlying client is running in Offline mode.
// Stat() will fail with an err if the block isn't in the // Stat() will fail with an err if the block isn't in the
@ -99,7 +99,7 @@ func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
} }
func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
rd, err := i.api.Block().Get(ctx, path.FromCid(cid)) rd, err := i.api.Block().Get(ctx, path.IpldPath(cid))
if err != nil { if err != nil {
return nil, xerrors.Errorf("getting ipfs block: %w", err) return nil, xerrors.Errorf("getting ipfs block: %w", err)
} }
@ -113,7 +113,7 @@ func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, er
} }
func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
st, err := i.api.Block().Stat(ctx, path.FromCid(cid)) st, err := i.api.Block().Stat(ctx, path.IpldPath(cid))
if err != nil { if err != nil {
return 0, xerrors.Errorf("getting ipfs block: %w", err) return 0, xerrors.Errorf("getting ipfs block: %w", err)
} }

View File

@ -182,6 +182,7 @@ type SplitStore struct {
compactionIndex int64 compactionIndex int64
pruneIndex int64 pruneIndex int64
onlineGCCnt int64
ctx context.Context ctx context.Context
cancel func() cancel func()

View File

@ -66,9 +66,8 @@ var (
) )
const ( const (
batchSize = 16384 batchSize = 16384
cidKeySize = 128 cidKeySize = 128
purgeWorkSliceDuration = time.Second
) )
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
@ -1373,21 +1372,9 @@ func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet
return err return err
} }
now := time.Now()
err := coldr.ForEach(func(c cid.Cid) error { err := coldr.ForEach(func(c cid.Cid) error {
batch = append(batch, c) batch = append(batch, c)
if len(batch) == batchSize { if len(batch) == batchSize {
// add some time slicing to the purge as this a very disk I/O heavy operation that
// requires write access to txnLk that may starve other operations that require
// access to the blockstore.
elapsed := time.Since(now)
if elapsed > purgeWorkSliceDuration {
// work 1 slice, sleep 4 slices, or 20% utilization
time.Sleep(4 * elapsed)
now = time.Now()
}
return deleteBatch() return deleteBatch()
} }

View File

@ -3,9 +3,9 @@ package splitstore
import ( import (
"context" "context"
"crypto/rand"
"errors" "errors"
"fmt" "fmt"
"math/rand"
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"

Binary file not shown.

View File

@ -1,4 +1,4 @@
/dns4/calibration.node.glif.io/tcp/1237/p2p/12D3KooWQPYouEAsUQKzvFUA9sQ8tz4rfpqtTzh2eL6USd9bwg7x /dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWCi2w8U4DDB9xqrejb5KYHaQv2iA2AJJ6uzG3iQxNLBMy
/dns4/bootstrap-calibnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWABQ5gTDHPWyvhJM7jPhtNwNJruzTEo32Lo4gcS5ABAMm /dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWDTayrBojBn9jWNNUih4nNQQBGJD7Zo3gQCKgBkUsS6dp
/dns4/bootstrap-calibnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWS3ZRhMYL67b4bD5XQ6fcpTyVQXnDe8H89LvwrDqaSbiT /dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWNRxTHUn8bf7jz1KEUPMc2dMgGfa4f8ZJTsquVSn3vHCG
/dns4/bootstrap-calibnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWEiBN8jBX8EBoM3M47pVRLRWV812gDRUJhMxgyVkUoR48 /dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWFWUqE9jgXvcKHWieYs9nhyp6NF4ftwLGAHm4sCv73jjK

View File

@ -1,9 +1,16 @@
/dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj
/dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc
/dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4
/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R
/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc
/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH
/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ
/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf
/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR
/dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk /dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk
/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz /dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u /dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt /dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt /dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d
/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST /dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP
/dns4/bootstrap-mainnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWKKkCZbcigsWTEu1cgNetNbZJqeNtysRtFpq7DTqw3eqH /dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt
/dns4/bootstrap-mainnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWGnkd9GQKo3apkShQDaq1d6cKJJmsVe6KiQkacUk1T8oZ
/dns4/bootstrap-mainnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWHQRSDFv4FvAjtU32shQ7znz7oRbLBryXzZ9NMK2feyyH

View File

@ -48,7 +48,6 @@ func init() {
if NetworkBundle == "calibrationnet" { if NetworkBundle == "calibrationnet" {
actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq"), actorstypes.Version12) actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq"), actorstypes.Version12)
actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzaced7emkbbnrewv5uvrokxpf5tlm4jslu2jsv77ofw2yqdglg657uie"), actorstypes.Version12) actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzaced7emkbbnrewv5uvrokxpf5tlm4jslu2jsv77ofw2yqdglg657uie"), actorstypes.Version12)
actors.AddActorMeta("verifiedregistry", cid.MustParse("bafk2bzacednskl3bykz5qpo54z2j2p4q44t5of4ktd6vs6ymmg2zebsbxazkm"), actorstypes.Version13)
} }
} }
@ -195,8 +194,7 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
// The following manifest cids existed temporarily on the calibnet testnet // The following manifest cids existed temporarily on the calibnet testnet
// We include them in our builtin bundle, but intentionally omit from metadata // We include them in our builtin bundle, but intentionally omit from metadata
if root == cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs") || if root == cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs") ||
root == cid.MustParse("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru") || root == cid.MustParse("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru") {
root == cid.MustParse("bafy2bzacea4firkyvt2zzdwqjrws5pyeluaesh6uaid246tommayr4337xpmi") {
continue continue
} }
bundles = append(bundles, &BuiltinActorsMetadata{ bundles = append(bundles, &BuiltinActorsMetadata{

View File

@ -95,10 +95,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"), "verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"),
}, },
}, { }, {
Network: "butterflynet", Network: "butterflynet",
Version: 12, Version: 12,
BundleGitTag: "v12.0.0-rc.3",
ManifestCid: MustParseCid("bafy2bzacectxvbk77ntedhztd6sszp2btrtvsmy7lp2ypnrk6yl74zb34t2cq"), ManifestCid: MustParseCid("bafy2bzacectxvbk77ntedhztd6sszp2btrtvsmy7lp2ypnrk6yl74zb34t2cq"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacebp7anjdtg2sohyt6lromx4xs7nujtwdfcsffnptphaayabx7ysxs"), "account": MustParseCid("bafk2bzacebp7anjdtg2sohyt6lromx4xs7nujtwdfcsffnptphaayabx7ysxs"),
"cron": MustParseCid("bafk2bzacecu2y3awtemmglpkroiglulc2fj3gpdn6eazdqr6avcautiaighrg"), "cron": MustParseCid("bafk2bzacecu2y3awtemmglpkroiglulc2fj3gpdn6eazdqr6avcautiaighrg"),
@ -117,29 +117,6 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"), "system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"),
"verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"), "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"),
}, },
}, {
Network: "butterflynet",
Version: 13,
BundleGitTag: "v13.0.0",
ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacedl533kwbzouqxibejpwp6syfdekvmzy4vmmno6j4iaydbdmv4xek"),
"cron": MustParseCid("bafk2bzacecimv5xnuwyoqgxk26qt4xqpgntleret475pnh35s3vvhqtdct4ow"),
"datacap": MustParseCid("bafk2bzacebpdd4ctavhs7wkcykfahpifct3p4hbptgtf4jfrqcp2trtlygvow"),
"eam": MustParseCid("bafk2bzaceahw5rrgj7prgbnmn237di7ymjz2ssea32wr525jydpfrwpuhs67m"),
"ethaccount": MustParseCid("bafk2bzacebrslcbew5mq3le2zsn36xqxd4gt5hryeoslxnuqwgw3rhuwh6ygu"),
"evm": MustParseCid("bafk2bzaced5smz4lhpem4mbr7igcskv3e5qopbdp7dqshww2qs4ahacgzjzo4"),
"init": MustParseCid("bafk2bzacedgj6hawhdw2ot2ufisci374o2bq6bfkvlvdt6q7s3uoe5ffyv43k"),
"multisig": MustParseCid("bafk2bzacectnnnpwyqiccaymy3h6ghu74ghjrqyhtqv5odfd4opivzebjj6to"),
"paymentchannel": MustParseCid("bafk2bzaceckhx44jawhzhkz6k23gfnv2gcutgb4j4ekhonj2plwaent4b2tpk"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacebbs3rlg7y3wbvxrj4wgbsqmasw4ksbbr3lyqbkaxj2t25qz6zzuy"),
"storagemarket": MustParseCid("bafk2bzaced3zmxsmlhp2nsiwkxcp2ugonbsebcd53t7htzo2jcoidvu464xmm"),
"storageminer": MustParseCid("bafk2bzacebedx7iaa2ruspxvghkg46ez7un5b7oiijjtnvddq2aot5wk7p7ry"),
"storagepower": MustParseCid("bafk2bzacebvne7m2l3hxxw4xa6oujol75x35yqpnlqiwx74jilyrop4cs7cse"),
"system": MustParseCid("bafk2bzaceacjmlxrvydlud77ilpzbscez46yedx6zjsj6olxsdeuv6d4x4cwe"),
"verifiedregistry": MustParseCid("bafk2bzacebs5muoq7ft2wgqojhjio7a4vltbyprqkmlr43ojlzbil4nwvj3jg"),
},
}, { }, {
Network: "calibrationnet", Network: "calibrationnet",
Version: 8, Version: 8,
@ -224,10 +201,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzaceceoo5jlom2zweh7kpye2vkj33wgqnkjshlsw2neemqkfg5g2rmvg"), "verifiedregistry": MustParseCid("bafk2bzaceceoo5jlom2zweh7kpye2vkj33wgqnkjshlsw2neemqkfg5g2rmvg"),
}, },
}, { }, {
Network: "calibrationnet", Network: "calibrationnet",
Version: 12, Version: 12,
BundleGitTag: "v12.0.0-rc.3",
ManifestCid: MustParseCid("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca"), ManifestCid: MustParseCid("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6"), "account": MustParseCid("bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6"),
"cron": MustParseCid("bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6"), "cron": MustParseCid("bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6"),
@ -246,29 +223,6 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"), "system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"),
"verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"), "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"),
}, },
}, {
Network: "calibrationnet",
Version: 13,
BundleGitTag: "v13.0.0",
ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6"),
"cron": MustParseCid("bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs"),
"datacap": MustParseCid("bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads"),
"eam": MustParseCid("bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg"),
"ethaccount": MustParseCid("bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu"),
"evm": MustParseCid("bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk"),
"init": MustParseCid("bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs"),
"multisig": MustParseCid("bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2"),
"paymentchannel": MustParseCid("bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc"),
"storagemarket": MustParseCid("bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi"),
"storageminer": MustParseCid("bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri"),
"storagepower": MustParseCid("bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu"),
"system": MustParseCid("bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6"),
"verifiedregistry": MustParseCid("bafk2bzacebj2zdquagzy2xxn7up574oemg3w7ed3fe4aujkyhgdwj57voesn2"),
},
}, { }, {
Network: "caterpillarnet", Network: "caterpillarnet",
Version: 8, Version: 8,
@ -362,10 +316,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"), "verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"),
}, },
}, { }, {
Network: "caterpillarnet", Network: "caterpillarnet",
Version: 12, Version: 12,
BundleGitTag: "v12.0.0-rc.3",
ManifestCid: MustParseCid("bafy2bzacebxiub6qsy67asvl5cx33x5vjbuqinalmf3xtnbmokxmmklzdkvei"), ManifestCid: MustParseCid("bafy2bzacebxiub6qsy67asvl5cx33x5vjbuqinalmf3xtnbmokxmmklzdkvei"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacecereuhejfvodut5357cai4lmhsyr7uenhcxvmw6jpmhe6auuly32"), "account": MustParseCid("bafk2bzacecereuhejfvodut5357cai4lmhsyr7uenhcxvmw6jpmhe6auuly32"),
"cron": MustParseCid("bafk2bzacebo2whgy6jla4jsf5j4ovlqm2e4eepedlpw5wadas33yxmunis4b4"), "cron": MustParseCid("bafk2bzacebo2whgy6jla4jsf5j4ovlqm2e4eepedlpw5wadas33yxmunis4b4"),
@ -384,29 +338,6 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"), "system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"),
"verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"), "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"),
}, },
}, {
Network: "caterpillarnet",
Version: 13,
BundleGitTag: "v13.0.0",
ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacecro3uo6ypqhfzwdhnamzcole5qmhrbkx7qny6t2qsrcpqxelt6s2"),
"cron": MustParseCid("bafk2bzaceam3kci46y4siltbw7f4itoap34kp7b7pvn2fco5s2bvnotomwdbe"),
"datacap": MustParseCid("bafk2bzacecmtdspcbqmmjtsaz4vucuqoqjqfsgxjonns7tom7eblkngbcm7bw"),
"eam": MustParseCid("bafk2bzaceaudqhrt7djewopqdnryvwxagfufyt7ja4gdvovrxbh6edh6evgrw"),
"ethaccount": MustParseCid("bafk2bzaced676ds3z6xe333wr7frwq3f2iq5kjwp4okl3te6rne3xf7kuqrwm"),
"evm": MustParseCid("bafk2bzacebeih4jt2s6mel6x4hje7xmnugh6twul2a5axx4iczu7fu4wcdi6k"),
"init": MustParseCid("bafk2bzaceba7vvuzzwj5wqnq2bvpbgtxup53mhr3qybezbllftnxvpqbfymxo"),
"multisig": MustParseCid("bafk2bzaceapkajhnqoczrgry5javqbl7uebgmsbpqqfemzc4yb5q2dqia2qog"),
"paymentchannel": MustParseCid("bafk2bzacebg7xq4ca22gafmdbkcq357x7v6slflib4h3fnj4amsovg6ulqg3o"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzaceajt4idf26ffnyipybcib55fykjxnek7oszkqzi7lu7mbgijmkgos"),
"storagemarket": MustParseCid("bafk2bzaceadfmay7pyl7osjsdmrireafasnjnoziacljy5ewrcsxpp56kzqbw"),
"storageminer": MustParseCid("bafk2bzaceardbn5a7aq5jxl7efr4btmsbl7txnxm4hrrd3llyhujuc2cr5vcs"),
"storagepower": MustParseCid("bafk2bzacear4563jznjqyseoy42xl6kenyqk6umv6xl3bp5bsjb3hbs6sp6bm"),
"system": MustParseCid("bafk2bzacecc5oavxivfnvirx2g7megpdf6lugooyoc2wijloju247xzjcdezy"),
"verifiedregistry": MustParseCid("bafk2bzacebnkdt42mpf5emypo6iroux3hszfh5yt54v2mmnnura3ketholly4"),
},
}, { }, {
Network: "devnet", Network: "devnet",
Version: 8, Version: 8,
@ -491,10 +422,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"), "verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"),
}, },
}, { }, {
Network: "devnet", Network: "devnet",
Version: 12, Version: 12,
BundleGitTag: "v12.0.0-rc.3",
ManifestCid: MustParseCid("bafy2bzaceasjdukhhyjbegpli247vbf5h64f7uvxhhebdihuqsj2mwisdwa6o"), ManifestCid: MustParseCid("bafy2bzaceasjdukhhyjbegpli247vbf5h64f7uvxhhebdihuqsj2mwisdwa6o"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacedki4apynvdxxuoigmqkgaktgy2erjftoxqxqaklnelgveyaqknfu"), "account": MustParseCid("bafk2bzacedki4apynvdxxuoigmqkgaktgy2erjftoxqxqaklnelgveyaqknfu"),
"cron": MustParseCid("bafk2bzacebjpczf7qtcisy3zdp3sqoohxe75tgupmdo5dr26vh7orzrsjn3b2"), "cron": MustParseCid("bafk2bzacebjpczf7qtcisy3zdp3sqoohxe75tgupmdo5dr26vh7orzrsjn3b2"),
@ -513,29 +444,6 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"), "system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"),
"verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"), "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"),
}, },
}, {
Network: "devnet",
Version: 13,
BundleGitTag: "v13.0.0",
ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacebev3fu5geeehpx577b3kvza4xsmmggmepjj7rlsnr27hpoq27q2i"),
"cron": MustParseCid("bafk2bzacedalzqahtuz2bmnf7uawbcujfhhe5xzv5ys5ufadu6ggs3tcu6lsy"),
"datacap": MustParseCid("bafk2bzaceb7ou2vn7ac4xidespoowq2q5w7ognr7s4ujy3xzzgiishajpe7le"),
"eam": MustParseCid("bafk2bzacedqic2qskattorj4svf6mbto2k76ej3ll3ugsyorqramrg7rpq3by"),
"ethaccount": MustParseCid("bafk2bzaceaoad7iknpywijigv2h3jyvkijff2oxvohzue533v5hby3iix5vdu"),
"evm": MustParseCid("bafk2bzacecjgiw26gagsn6a7tffkrgoor4zfgzfokp76u6cwervtmvjbopmwg"),
"init": MustParseCid("bafk2bzaced2obubqojxggeddr246cpwtyzi6knnq52jsvsc2fs3tuk2kh6dtg"),
"multisig": MustParseCid("bafk2bzacebquruzb6zho45orbdkku624t6w6jt4tudaqzraz4yh3li3jfstpg"),
"paymentchannel": MustParseCid("bafk2bzaceaydrilyxvflsuzr24hmw32qwz6sy4hgls73bhpveydcsqskdgpca"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzaceb74owpuzdddqoj2tson6ymbyuguqrnqefyiaxqvwm4ygitpabjrq"),
"storagemarket": MustParseCid("bafk2bzaceaw6dslv6pfqha4ynghq2imij5khnnjrie22kmfgtpie3bvxho6jq"),
"storageminer": MustParseCid("bafk2bzacecsputz6xygjfyrvx2d7bxkpp7b5v4icrmpckec7gnbabx2w377qs"),
"storagepower": MustParseCid("bafk2bzaceceyaa5yjwhxvvcqouob4l746zp5nesivr6enhtpimakdtby6kafi"),
"system": MustParseCid("bafk2bzaceaxg6k5vuozxlemfi5hv663m6jcawzu5puboo4znj73i36e3tsovs"),
"verifiedregistry": MustParseCid("bafk2bzacea2czkb4vt2iiiwdb6e57qfwqse4mk2pcyvwjmdl5ojbnla57oh2u"),
},
}, { }, {
Network: "hyperspace", Network: "hyperspace",
Version: 8, Version: 8,
@ -643,10 +551,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"), "verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"),
}, },
}, { }, {
Network: "mainnet", Network: "mainnet",
Version: 12, Version: 12,
BundleGitTag: "v12.0.0-rc.3",
ManifestCid: MustParseCid("bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio"), ManifestCid: MustParseCid("bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa"), "account": MustParseCid("bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa"),
"cron": MustParseCid("bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls"), "cron": MustParseCid("bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls"),
@ -665,29 +573,6 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"), "system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"),
"verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"), "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"),
}, },
}, {
Network: "mainnet",
Version: 13,
BundleGitTag: "v13.0.0",
ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52"),
"cron": MustParseCid("bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc"),
"datacap": MustParseCid("bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci"),
"eam": MustParseCid("bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk"),
"ethaccount": MustParseCid("bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei"),
"evm": MustParseCid("bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q"),
"init": MustParseCid("bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai"),
"multisig": MustParseCid("bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc"),
"paymentchannel": MustParseCid("bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu"),
"storagemarket": MustParseCid("bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu"),
"storageminer": MustParseCid("bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2"),
"storagepower": MustParseCid("bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e"),
"system": MustParseCid("bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e"),
"verifiedregistry": MustParseCid("bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta"),
},
}, { }, {
Network: "testing", Network: "testing",
Version: 8, Version: 8,
@ -772,10 +657,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"),
}, },
}, { }, {
Network: "testing", Network: "testing",
Version: 12, Version: 12,
BundleGitTag: "v12.0.0-rc.3",
ManifestCid: MustParseCid("bafy2bzaceaaxd6ytavsek5bi5soqo7qamezuqfyfjy42es2clpbzu3pwzcmye"), ManifestCid: MustParseCid("bafy2bzaceaaxd6ytavsek5bi5soqo7qamezuqfyfjy42es2clpbzu3pwzcmye"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"),
"cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"),
@ -794,29 +679,6 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
"verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
}, },
}, {
Network: "testing",
Version: 13,
BundleGitTag: "v13.0.0",
ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"),
"cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"),
"datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"),
"eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"),
"ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"),
"evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"),
"init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"),
"multisig": MustParseCid("bafk2bzacebmftoql6dcyqf54xznwjg2bfgdsi67spqquwslpvvtvcx6qenhz2"),
"paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"),
"storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"),
"storageminer": MustParseCid("bafk2bzaceailclue4dba2edjethfjw6ycufcwsx4qjjmgsh77xcyprmogdjvu"),
"storagepower": MustParseCid("bafk2bzaceaqw6dhdjlqovhk3p4lb4sb25i5d6mhln2ir5m7tj6m4fegkgkinw"),
"system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"),
"verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"),
},
}, { }, {
Network: "testing-fake-proofs", Network: "testing-fake-proofs",
Version: 8, Version: 8,
@ -901,10 +763,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"),
}, },
}, { }, {
Network: "testing-fake-proofs", Network: "testing-fake-proofs",
Version: 12, Version: 12,
BundleGitTag: "v12.0.0-rc.3",
ManifestCid: MustParseCid("bafy2bzacecver4l5d6jiuzubhrtcxjjfdx6jnxbmyp4bselol2atgkhz3e3um"), ManifestCid: MustParseCid("bafy2bzacecver4l5d6jiuzubhrtcxjjfdx6jnxbmyp4bselol2atgkhz3e3um"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"),
"cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"),
@ -923,27 +785,4 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
"system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
"verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
}, },
}, {
Network: "testing-fake-proofs",
Version: 13,
BundleGitTag: "v13.0.0",
ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"),
Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"),
"cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"),
"datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"),
"eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"),
"ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"),
"evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"),
"init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"),
"multisig": MustParseCid("bafk2bzacedy4vldq4viv6bzzh4fueip3by3axsbgbh655lashddgumknc6pvs"),
"paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"),
"storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"),
"storageminer": MustParseCid("bafk2bzaceb6atn3k6yhmskgmc3lgfiwpzpfmaxzacohtnb2hivme2oroycqr6"),
"storagepower": MustParseCid("bafk2bzacedameh56mp2g4y7nprhax5sddbzcmpk5p7l523l45rtn2wjc6ah4e"),
"system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"),
"verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"),
},
}} }}

View File

@ -10,8 +10,8 @@ type DrandEnum int
func DrandConfigSchedule() dtypes.DrandSchedule { func DrandConfigSchedule() dtypes.DrandSchedule {
out := dtypes.DrandSchedule{} out := dtypes.DrandSchedule{}
for start, network := range DrandSchedule { for start, config := range DrandSchedule {
out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[network]}) out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]})
} }
sort.Slice(out, func(i, j int) bool { sort.Slice(out, func(i, j int) bool {
@ -27,7 +27,6 @@ const (
DrandDevnet DrandDevnet
DrandLocalnet DrandLocalnet
DrandIncentinet DrandIncentinet
DrandQuicknet
) )
var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
@ -37,32 +36,14 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
"https://api2.drand.sh", "https://api2.drand.sh",
"https://api3.drand.sh", "https://api3.drand.sh",
"https://drand.cloudflare.com", "https://drand.cloudflare.com",
"https://api.drand.secureweb3.com:6875", // Storswift
}, },
Relays: []string{ Relays: []string{
"/dnsaddr/api.drand.sh/", "/dnsaddr/api.drand.sh/",
"/dnsaddr/api2.drand.sh/", "/dnsaddr/api2.drand.sh/",
"/dnsaddr/api3.drand.sh/", "/dnsaddr/api3.drand.sh/",
}, },
IsChained: true,
ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`,
}, },
DrandQuicknet: {
Servers: []string{
"https://api.drand.sh",
"https://api2.drand.sh",
"https://api3.drand.sh",
"https://drand.cloudflare.com",
"https://api.drand.secureweb3.com:6875", // Storswift
},
Relays: []string{
"/dnsaddr/api.drand.sh/",
"/dnsaddr/api2.drand.sh/",
"/dnsaddr/api3.drand.sh/",
},
IsChained: false,
ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`,
},
DrandTestnet: { DrandTestnet: {
Servers: []string{ Servers: []string{
"https://pl-eu.testnet.drand.sh", "https://pl-eu.testnet.drand.sh",
@ -74,7 +55,6 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
"/dnsaddr/pl-us.testnet.drand.sh/", "/dnsaddr/pl-us.testnet.drand.sh/",
"/dnsaddr/pl-sin.testnet.drand.sh/", "/dnsaddr/pl-sin.testnet.drand.sh/",
}, },
IsChained: true,
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
}, },
DrandDevnet: { DrandDevnet: {
@ -86,11 +66,9 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
"/dnsaddr/dev1.drand.sh/", "/dnsaddr/dev1.drand.sh/",
"/dnsaddr/dev2.drand.sh/", "/dnsaddr/dev2.drand.sh/",
}, },
IsChained: true,
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
}, },
DrandIncentinet: { DrandIncentinet: {
IsChained: true,
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
}, },
} }

View File

@ -5,8 +5,5 @@ import (
) )
func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool { func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool {
if upgradeEpoch < 0 {
return false
}
return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality return epoch > upgradeEpoch-Finality && epoch < upgradeEpoch+Finality
} }

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -23,7 +23,7 @@ var NetworkBundle = "devnet"
var BundleOverrides map[actorstypes.Version]string var BundleOverrides map[actorstypes.Version]string
var ActorDebugging = true var ActorDebugging = true
var GenesisNetworkVersion = network.Version21 const GenesisNetworkVersion = network.Version20
var UpgradeBreezeHeight = abi.ChainEpoch(-1) var UpgradeBreezeHeight = abi.ChainEpoch(-1)
@ -65,11 +65,7 @@ var UpgradeLightningHeight = abi.ChainEpoch(-22)
var UpgradeThunderHeight = abi.ChainEpoch(-23) var UpgradeThunderHeight = abi.ChainEpoch(-23)
var UpgradeWatermelonHeight = abi.ChainEpoch(-24) var UpgradeWatermelonHeight = abi.ChainEpoch(200)
var UpgradeDragonHeight = abi.ChainEpoch(20)
var UpgradePhoenixHeight = UpgradeDragonHeight + 120
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -100 const UpgradeWatermelonFixHeight = -100
@ -77,12 +73,8 @@ const UpgradeWatermelonFixHeight = -100
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFix2Height = -101 const UpgradeWatermelonFix2Height = -101
// This fix upgrade only ran on calibrationnet
const UpgradeCalibrationDragonFixHeight = -102
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
} }
var SupportedProofTypes = []abi.RegisteredSealProof{ var SupportedProofTypes = []abi.RegisteredSealProof{
@ -99,22 +91,6 @@ func init() {
policy.SetMinVerifiedDealSize(MinVerifiedDealSize) policy.SetMinVerifiedDealSize(MinVerifiedDealSize)
policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay) policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay)
getGenesisNetworkVersion := func(ev string, def network.Version) network.Version {
hs, found := os.LookupEnv(ev)
if found {
h, err := strconv.Atoi(hs)
if err != nil {
log.Panicf("failed to parse %s env var", ev)
}
return network.Version(h)
}
return def
}
GenesisNetworkVersion = getGenesisNetworkVersion("LOTUS_GENESIS_NETWORK_VERSION", GenesisNetworkVersion)
getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch { getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch {
hs, found := os.LookupEnv(ev) hs, found := os.LookupEnv(ev)
if found { if found {
@ -153,13 +129,6 @@ func init() {
UpgradeLightningHeight = getUpgradeHeight("LOTUS_LIGHTNING_HEIGHT", UpgradeLightningHeight) UpgradeLightningHeight = getUpgradeHeight("LOTUS_LIGHTNING_HEIGHT", UpgradeLightningHeight)
UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight) UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight)
UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight) UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight)
UpgradeDragonHeight = getUpgradeHeight("LOTUS_DRAGON_HEIGHT", UpgradeDragonHeight)
UpgradePhoenixHeight = getUpgradeHeight("LOTUS_PHOENIX_HEIGHT", UpgradePhoenixHeight)
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
}
BuildType |= Build2k BuildType |= Build2k

View File

@ -16,11 +16,10 @@ import (
) )
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
} }
const GenesisNetworkVersion = network.Version21 const GenesisNetworkVersion = network.Version20
var NetworkBundle = "butterflynet" var NetworkBundle = "butterflynet"
var BundleOverrides map[actorstypes.Version]string var BundleOverrides map[actorstypes.Version]string
@ -55,11 +54,8 @@ const UpgradeSharkHeight = -20
const UpgradeHyggeHeight = -21 const UpgradeHyggeHeight = -21
const UpgradeLightningHeight = -22 const UpgradeLightningHeight = -22
const UpgradeThunderHeight = -23 const UpgradeThunderHeight = -23
const UpgradeWatermelonHeight = -24
const UpgradeDragonHeight = 5760 const UpgradeWatermelonHeight = 400
const UpgradePhoenixHeight = UpgradeDragonHeight + 120
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -100 const UpgradeWatermelonFixHeight = -100
@ -67,9 +63,6 @@ const UpgradeWatermelonFixHeight = -100
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFix2Height = -101 const UpgradeWatermelonFix2Height = -101
// This fix upgrade only ran on calibrationnet
const UpgradeCalibrationDragonFixHeight = -102
var SupportedProofTypes = []abi.RegisteredSealProof{ var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg512MiBV1, abi.RegisteredSealProof_StackedDrg512MiBV1,
abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1,

View File

@ -19,8 +19,7 @@ import (
) )
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
} }
const GenesisNetworkVersion = network.Version0 const GenesisNetworkVersion = network.Version0
@ -89,15 +88,6 @@ const UpgradeWatermelonFixHeight = 1070494
// 2023-11-21T13:00:00Z // 2023-11-21T13:00:00Z
const UpgradeWatermelonFix2Height = 1108174 const UpgradeWatermelonFix2Height = 1108174
// 2024-03-11T14:00:00Z
const UpgradeDragonHeight = 1427974
// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet
const UpgradePhoenixHeight = UpgradeDragonHeight + 120
// 2024-04-03T11:00:00Z
const UpgradeCalibrationDragonFixHeight = 1493854
var SupportedProofTypes = []abi.RegisteredSealProof{ var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1,

View File

@ -53,11 +53,8 @@ var UpgradeSharkHeight = abi.ChainEpoch(-20)
var UpgradeHyggeHeight = abi.ChainEpoch(-21) var UpgradeHyggeHeight = abi.ChainEpoch(-21)
var UpgradeLightningHeight = abi.ChainEpoch(-22) var UpgradeLightningHeight = abi.ChainEpoch(-22)
var UpgradeThunderHeight = abi.ChainEpoch(-23) var UpgradeThunderHeight = abi.ChainEpoch(-23)
var UpgradeWatermelonHeight = abi.ChainEpoch(-24)
const UpgradeDragonHeight = 50 const UpgradeWatermelonHeight = 50
const UpgradePhoenixHeight = UpgradeDragonHeight + 100
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -1 const UpgradeWatermelonFixHeight = -1
@ -65,12 +62,8 @@ const UpgradeWatermelonFixHeight = -1
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFix2Height = -2 const UpgradeWatermelonFix2Height = -2
// This fix upgrade only ran on calibrationnet
const UpgradeCalibrationDragonFixHeight = -3
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
} }
var SupportedProofTypes = []abi.RegisteredSealProof{ var SupportedProofTypes = []abi.RegisteredSealProof{

View File

@ -16,9 +16,8 @@ import (
) )
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandIncentinet, 0: DrandIncentinet,
UpgradeSmokeHeight: DrandMainnet, UpgradeSmokeHeight: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
} }
var NetworkBundle = "mainnet" var NetworkBundle = "mainnet"
@ -97,14 +96,7 @@ const UpgradeLightningHeight = 2809800
const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21 const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21
// 2023-12-12T13:30:00Z // 2023-12-12T13:30:00Z
const UpgradeWatermelonHeight = 3469380 var UpgradeWatermelonHeight = abi.ChainEpoch(3469380)
// 2024-04-24T14:00:00Z
var UpgradeDragonHeight = abi.ChainEpoch(3855360)
// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet
// 2024-04-11T15:00:00Z
var UpgradePhoenixHeight = UpgradeDragonHeight + 120
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -1 const UpgradeWatermelonFixHeight = -1
@ -112,9 +104,6 @@ const UpgradeWatermelonFixHeight = -1
// This fix upgrade only ran on calibrationnet // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFix2Height = -2 const UpgradeWatermelonFix2Height = -2
// This fix upgrade only ran on calibrationnet
const UpgradeCalibrationDragonFixHeight = -3
var SupportedProofTypes = []abi.RegisteredSealProof{ var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1,
abi.RegisteredSealProof_StackedDrg64GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1,
@ -130,10 +119,8 @@ func init() {
SetAddressNetwork(address.Mainnet) SetAddressNetwork(address.Mainnet)
} }
if os.Getenv("LOTUS_DISABLE_DRAGON") == "1" { if os.Getenv("LOTUS_DISABLE_WATERMELON") == "1" {
UpgradeDragonHeight = math.MaxInt64 - 1 UpgradeWatermelonHeight = math.MaxInt64
delete(DrandSchedule, UpgradePhoenixHeight)
UpgradePhoenixHeight = math.MaxInt64
} }
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however, // NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,

View File

@ -30,7 +30,7 @@ const AllowableClockDriftSecs = uint64(1)
/* inline-gen template /* inline-gen template
const TestNetworkVersion = network.Version{{.latestNetworkVersion}} const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
/* inline-gen start */ /* inline-gen start */
const TestNetworkVersion = network.Version22 const TestNetworkVersion = network.Version21
/* inline-gen end */ /* inline-gen end */

View File

@ -87,38 +87,34 @@ var (
UpgradeBreezeHeight abi.ChainEpoch = -1 UpgradeBreezeHeight abi.ChainEpoch = -1
BreezeGasTampingDuration abi.ChainEpoch = 0 BreezeGasTampingDuration abi.ChainEpoch = 0
UpgradeSmokeHeight abi.ChainEpoch = -1 UpgradeSmokeHeight abi.ChainEpoch = -1
UpgradeIgnitionHeight abi.ChainEpoch = -2 UpgradeIgnitionHeight abi.ChainEpoch = -2
UpgradeRefuelHeight abi.ChainEpoch = -3 UpgradeRefuelHeight abi.ChainEpoch = -3
UpgradeTapeHeight abi.ChainEpoch = -4 UpgradeTapeHeight abi.ChainEpoch = -4
UpgradeAssemblyHeight abi.ChainEpoch = 10 UpgradeAssemblyHeight abi.ChainEpoch = 10
UpgradeLiftoffHeight abi.ChainEpoch = -5 UpgradeLiftoffHeight abi.ChainEpoch = -5
UpgradeKumquatHeight abi.ChainEpoch = -6 UpgradeKumquatHeight abi.ChainEpoch = -6
UpgradeCalicoHeight abi.ChainEpoch = -8 UpgradeCalicoHeight abi.ChainEpoch = -8
UpgradePersianHeight abi.ChainEpoch = -9 UpgradePersianHeight abi.ChainEpoch = -9
UpgradeOrangeHeight abi.ChainEpoch = -10 UpgradeOrangeHeight abi.ChainEpoch = -10
UpgradeClausHeight abi.ChainEpoch = -11 UpgradeClausHeight abi.ChainEpoch = -11
UpgradeTrustHeight abi.ChainEpoch = -12 UpgradeTrustHeight abi.ChainEpoch = -12
UpgradeNorwegianHeight abi.ChainEpoch = -13 UpgradeNorwegianHeight abi.ChainEpoch = -13
UpgradeTurboHeight abi.ChainEpoch = -14 UpgradeTurboHeight abi.ChainEpoch = -14
UpgradeHyperdriveHeight abi.ChainEpoch = -15 UpgradeHyperdriveHeight abi.ChainEpoch = -15
UpgradeChocolateHeight abi.ChainEpoch = -16 UpgradeChocolateHeight abi.ChainEpoch = -16
UpgradeOhSnapHeight abi.ChainEpoch = -17 UpgradeOhSnapHeight abi.ChainEpoch = -17
UpgradeSkyrHeight abi.ChainEpoch = -18 UpgradeSkyrHeight abi.ChainEpoch = -18
UpgradeSharkHeight abi.ChainEpoch = -19 UpgradeSharkHeight abi.ChainEpoch = -19
UpgradeHyggeHeight abi.ChainEpoch = -20 UpgradeHyggeHeight abi.ChainEpoch = -20
UpgradeLightningHeight abi.ChainEpoch = -21 UpgradeLightningHeight abi.ChainEpoch = -21
UpgradeThunderHeight abi.ChainEpoch = -22 UpgradeThunderHeight abi.ChainEpoch = -22
UpgradeWatermelonHeight abi.ChainEpoch = -23 UpgradeWatermelonHeight abi.ChainEpoch = -23
UpgradeWatermelonFixHeight abi.ChainEpoch = -24 UpgradeWatermelonFixHeight abi.ChainEpoch = -24
UpgradeWatermelonFix2Height abi.ChainEpoch = -25 UpgradeWatermelonFix2Height abi.ChainEpoch = -25
UpgradeDragonHeight abi.ChainEpoch = -26
UpgradePhoenixHeight abi.ChainEpoch = -27
UpgradeCalibrationDragonFixHeight abi.ChainEpoch = -28
DrandSchedule = map[abi.ChainEpoch]DrandEnum{ DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
} }
GenesisNetworkVersion = network.Version0 GenesisNetworkVersion = network.Version0

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
} }
// BuildVersion is the local build version // BuildVersion is the local build version
const BuildVersion = "1.26.3" const BuildVersion = "1.25.0-rc5"
func UserVersion() string { func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -6,7 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtin13 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -22,7 +22,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
var Methods = builtin13.MethodsAccount var Methods = builtin12.MethodsAccount
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
@ -47,9 +47,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return load12(store, act.Head) return load12(store, act.Head)
case actorstypes.Version13:
return load13(store, act.Head)
} }
} }
@ -120,9 +117,6 @@ func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (S
case actorstypes.Version12: case actorstypes.Version12:
return make12(store, addr) return make12(store, addr)
case actorstypes.Version13:
return make13(store, addr)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -152,6 +146,5 @@ func AllCodes() []cid.Cid {
(&state10{}).Code(), (&state10{}).Code(),
(&state11{}).Code(), (&state11{}).Code(),
(&state12{}).Code(), (&state12{}).Code(),
(&state13{}).Code(),
} }
} }

View File

@ -1,62 +0,0 @@
package account
import (
"fmt"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
actorstypes "github.com/filecoin-project/go-state-types/actors"
account13 "github.com/filecoin-project/go-state-types/builtin/v13/account"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
var _ State = (*state13)(nil)
func load13(store adt.Store, root cid.Cid) (State, error) {
out := state13{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make13(store adt.Store, addr address.Address) (State, error) {
out := state13{store: store}
out.State = account13.State{Address: addr}
return &out, nil
}
type state13 struct {
account13.State
store adt.Store
}
func (s *state13) PubkeyAddress() (address.Address, error) {
return s.Address, nil
}
func (s *state13) GetState() interface{} {
return &s.State
}
func (s *state13) ActorKey() string {
return manifest.AccountKey
}
func (s *state13) ActorVersion() actorstypes.Version {
return actorstypes.Version13
}
func (s *state13) Code() cid.Cid {
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
if !ok {
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
}
return code
}

View File

@ -5,7 +5,7 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtin13 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
@ -43,9 +43,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return load12(store, act.Head) return load12(store, act.Head)
case actorstypes.Version13:
return load13(store, act.Head)
} }
} }
@ -116,16 +113,13 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return make12(store) return make12(store)
case actorstypes.Version13:
return make13(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
var ( var (
Address = builtin13.CronActorAddr Address = builtin12.CronActorAddr
Methods = builtin13.MethodsCron Methods = builtin12.MethodsCron
) )
type State interface { type State interface {
@ -150,6 +144,5 @@ func AllCodes() []cid.Cid {
(&state10{}).Code(), (&state10{}).Code(),
(&state11{}).Code(), (&state11{}).Code(),
(&state12{}).Code(), (&state12{}).Code(),
(&state13{}).Code(),
} }
} }

View File

@ -1,57 +0,0 @@
package cron
import (
"fmt"
"github.com/ipfs/go-cid"
actorstypes "github.com/filecoin-project/go-state-types/actors"
cron13 "github.com/filecoin-project/go-state-types/builtin/v13/cron"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
var _ State = (*state13)(nil)
func load13(store adt.Store, root cid.Cid) (State, error) {
out := state13{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make13(store adt.Store) (State, error) {
out := state13{store: store}
out.State = *cron13.ConstructState(cron13.BuiltInEntries())
return &out, nil
}
type state13 struct {
cron13.State
store adt.Store
}
func (s *state13) GetState() interface{} {
return &s.State
}
func (s *state13) ActorKey() string {
return manifest.CronKey
}
func (s *state13) ActorVersion() actorstypes.Version {
return actorstypes.Version13
}
func (s *state13) Code() cid.Cid {
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
if !ok {
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
}
return code
}

View File

@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtin13 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
@ -17,8 +17,8 @@ import (
) )
var ( var (
Address = builtin13.DatacapActorAddr Address = builtin12.DatacapActorAddr
Methods = builtin13.MethodsDatacap Methods = builtin12.MethodsDatacap
) )
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -41,9 +41,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return load12(store, act.Head) return load12(store, act.Head)
case actorstypes.Version13:
return load13(store, act.Head)
} }
} }
@ -65,9 +62,6 @@ func MakeState(store adt.Store, av actorstypes.Version, governor address.Address
case actorstypes.Version12: case actorstypes.Version12:
return make12(store, governor, bitwidth) return make12(store, governor, bitwidth)
case actorstypes.Version13:
return make13(store, governor, bitwidth)
default: default:
return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av) return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av)
} }
@ -92,6 +86,5 @@ func AllCodes() []cid.Cid {
(&state10{}).Code(), (&state10{}).Code(),
(&state11{}).Code(), (&state11{}).Code(),
(&state12{}).Code(), (&state12{}).Code(),
(&state13{}).Code(),
} }
} }

View File

@ -1,82 +0,0 @@
package datacap
import (
"fmt"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
datacap13 "github.com/filecoin-project/go-state-types/builtin/v13/datacap"
adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
var _ State = (*state13)(nil)
func load13(store adt.Store, root cid.Cid) (State, error) {
out := state13{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make13(store adt.Store, governor address.Address, bitwidth uint64) (State, error) {
out := state13{store: store}
s, err := datacap13.ConstructState(store, governor, bitwidth)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state13 struct {
datacap13.State
store adt.Store
}
func (s *state13) Governor() (address.Address, error) {
return s.State.Governor, nil
}
func (s *state13) GetState() interface{} {
return &s.State
}
func (s *state13) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
return forEachClient(s.store, actors.Version13, s.verifiedClients, cb)
}
func (s *state13) verifiedClients() (adt.Map, error) {
return adt13.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth))
}
func (s *state13) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
return getDataCap(s.store, actors.Version13, s.verifiedClients, addr)
}
func (s *state13) ActorKey() string {
return manifest.DatacapKey
}
func (s *state13) ActorVersion() actorstypes.Version {
return actorstypes.Version13
}
func (s *state13) Code() cid.Cid {
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
if !ok {
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
}
return code
}

View File

@ -10,8 +10,6 @@ import (
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin"
@ -19,18 +17,6 @@ import (
var Methods = builtin{{.latestVersion}}.MethodsEVM var Methods = builtin{{.latestVersion}}.MethodsEVM
// See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42
const (
ErrReverted exitcode.ExitCode = iota + 33 // EVM exit codes start at 33
ErrInvalidInstruction
ErrUndefinedInstruction
ErrStackUnderflow
ErrStackOverflow
ErrIllegalMemoryAccess
ErrBadJumpdest
ErrSelfdestructFailed
)
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
if name != manifest.EvmKey { if name != manifest.EvmKey {

View File

@ -5,9 +5,8 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtin13 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -15,19 +14,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
var Methods = builtin13.MethodsEVM var Methods = builtin12.MethodsEVM
// See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42
const (
ErrReverted exitcode.ExitCode = iota + 33 // EVM exit codes start at 33
ErrInvalidInstruction
ErrUndefinedInstruction
ErrStackUnderflow
ErrStackOverflow
ErrIllegalMemoryAccess
ErrBadJumpdest
ErrSelfdestructFailed
)
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
@ -46,9 +33,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return load12(store, act.Head) return load12(store, act.Head)
case actorstypes.Version13:
return load13(store, act.Head)
} }
} }
@ -67,9 +51,6 @@ func MakeState(store adt.Store, av actorstypes.Version, bytecode cid.Cid) (State
case actorstypes.Version12: case actorstypes.Version12:
return make12(store, bytecode) return make12(store, bytecode)
case actorstypes.Version13:
return make13(store, bytecode)
default: default:
return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av) return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av)
} }

View File

@ -1,72 +0,0 @@
package evm
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
evm13 "github.com/filecoin-project/go-state-types/builtin/v13/evm"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
var _ State = (*state13)(nil)
func load13(store adt.Store, root cid.Cid) (State, error) {
out := state13{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make13(store adt.Store, bytecode cid.Cid) (State, error) {
out := state13{store: store}
s, err := evm13.ConstructState(store, bytecode)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state13 struct {
evm13.State
store adt.Store
}
func (s *state13) Nonce() (uint64, error) {
return s.State.Nonce, nil
}
func (s *state13) IsAlive() (bool, error) {
return s.State.Tombstone == nil, nil
}
func (s *state13) GetState() interface{} {
return &s.State
}
func (s *state13) GetBytecodeCID() (cid.Cid, error) {
return s.State.Bytecode, nil
}
func (s *state13) GetBytecodeHash() ([32]byte, error) {
return s.State.BytecodeHash, nil
}
func (s *state13) GetBytecode() ([]byte, error) {
bc, err := s.GetBytecodeCID()
if err != nil {
return nil, err
}
var byteCode abi.CborBytesTransparent
if err := s.store.Get(s.store.Context(), bc, &byteCode); err != nil {
return nil, err
}
return byteCode, nil
}

View File

@ -7,7 +7,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtin13 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -25,8 +25,8 @@ import (
) )
var ( var (
Address = builtin13.InitActorAddr Address = builtin12.InitActorAddr
Methods = builtin13.MethodsInit Methods = builtin12.MethodsInit
) )
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
@ -52,9 +52,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return load12(store, act.Head) return load12(store, act.Head)
case actorstypes.Version13:
return load13(store, act.Head)
} }
} }
@ -125,9 +122,6 @@ func MakeState(store adt.Store, av actorstypes.Version, networkName string) (Sta
case actorstypes.Version12: case actorstypes.Version12:
return make12(store, networkName) return make12(store, networkName)
case actorstypes.Version13:
return make13(store, networkName)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -180,6 +174,5 @@ func AllCodes() []cid.Cid {
(&state10{}).Code(), (&state10{}).Code(),
(&state11{}).Code(), (&state11{}).Code(),
(&state12{}).Code(), (&state12{}).Code(),
(&state13{}).Code(),
} }
} }

View File

@ -1,147 +0,0 @@
package init
import (
"crypto/sha256"
"fmt"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtin13 "github.com/filecoin-project/go-state-types/builtin"
init13 "github.com/filecoin-project/go-state-types/builtin/v13/init"
adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
var _ State = (*state13)(nil)
func load13(store adt.Store, root cid.Cid) (State, error) {
out := state13{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make13(store adt.Store, networkName string) (State, error) {
out := state13{store: store}
s, err := init13.ConstructState(store, networkName)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state13 struct {
init13.State
store adt.Store
}
func (s *state13) ResolveAddress(address address.Address) (address.Address, bool, error) {
return s.State.ResolveAddress(s.store, address)
}
func (s *state13) MapAddressToNewID(address address.Address) (address.Address, error) {
return s.State.MapAddressToNewID(s.store, address)
}
func (s *state13) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
addrs, err := adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth)
if err != nil {
return err
}
var actorID cbg.CborInt
return addrs.ForEach(&actorID, func(key string) error {
addr, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
return cb(abi.ActorID(actorID), addr)
})
}
func (s *state13) NetworkName() (dtypes.NetworkName, error) {
return dtypes.NetworkName(s.State.NetworkName), nil
}
func (s *state13) SetNetworkName(name string) error {
s.State.NetworkName = name
return nil
}
func (s *state13) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state13) Remove(addrs ...address.Address) (err error) {
m, err := adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth)
if err != nil {
return err
}
for _, addr := range addrs {
if err = m.Delete(abi.AddrKey(addr)); err != nil {
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
}
}
amr, err := m.Root()
if err != nil {
return xerrors.Errorf("failed to get address map root: %w", err)
}
s.State.AddressMap = amr
return nil
}
func (s *state13) SetAddressMap(mcid cid.Cid) error {
s.State.AddressMap = mcid
return nil
}
func (s *state13) GetState() interface{} {
return &s.State
}
func (s *state13) AddressMap() (adt.Map, error) {
return adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth)
}
func (s *state13) AddressMapBitWidth() int {
return builtin13.DefaultHamtBitwidth
}
func (s *state13) AddressMapHashFunction() func(input []byte) []byte {
return func(input []byte) []byte {
res := sha256.Sum256(input)
return res[:]
}
}
func (s *state13) ActorKey() string {
return manifest.InitKey
}
func (s *state13) ActorVersion() actorstypes.Version {
return actorstypes.Version13
}
func (s *state13) Code() cid.Cid {
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
if !ok {
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
}
return code
}

View File

@ -103,10 +103,10 @@ type BalanceTable interface {
type DealStates interface { type DealStates interface {
ForEach(cb func(id abi.DealID, ds DealState) error) error ForEach(cb func(id abi.DealID, ds DealState) error) error
Get(id abi.DealID) (DealState, bool, error) Get(id abi.DealID) (*DealState, bool, error)
array() adt.Array array() adt.Array
decode(*cbg.Deferred) (DealState, error) decode(*cbg.Deferred) (*DealState, error)
} }
type DealProposals interface { type DealProposals interface {
@ -142,17 +142,7 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
type DealProposal = markettypes.DealProposal type DealProposal = markettypes.DealProposal
type DealLabel = markettypes.DealLabel type DealLabel = markettypes.DealLabel
type DealState interface { type DealState = markettypes.DealState
SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector
LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated
SlashEpoch() abi.ChainEpoch // -1 if deal never slashed
Equals(other DealState) bool
}
func DealStatesEqual(a, b DealState) bool {
return DealStatesEqual(a, b)
}
type DealStateChanges struct { type DealStateChanges struct {
Added []DealIDState Added []DealIDState
@ -168,8 +158,8 @@ type DealIDState struct {
// DealStateChange is a change in deal state from -> to // DealStateChange is a change in deal state from -> to
type DealStateChange struct { type DealStateChange struct {
ID abi.DealID ID abi.DealID
From DealState From *DealState
To DealState To *DealState
} }
type DealProposalChanges struct { type DealProposalChanges struct {
@ -182,36 +172,12 @@ type ProposalIDState struct {
Proposal markettypes.DealProposal Proposal markettypes.DealProposal
} }
func EmptyDealState() *DealState {
type emptyDealState struct{} return &DealState{
SectorStartEpoch: -1,
func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { SlashEpoch: -1,
return -1 LastUpdatedEpoch: -1,
}
func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch {
return -1
}
func (e *emptyDealState) SlashEpoch() abi.ChainEpoch {
return -1
}
func (e *emptyDealState) Equals(other DealState) bool {
if e.SectorStartEpoch() != other.SectorStartEpoch() {
return false
} }
if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if e.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
func EmptyDealState() DealState {
return &emptyDealState{}
} }
// returns the earned fees and pending fees for a given deal // returns the earned fees and pending fees for a given deal
@ -230,8 +196,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke
return ef, big.Sub(tf, ef) return ef, big.Sub(tf, ef)
} }
func IsDealActive(state DealState) bool { func IsDealActive(state markettypes.DealState) bool {
return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 return state.SectorStartEpoch > -1 && state.SlashEpoch == -1
} }
func labelFromGoString(s string) (markettypes.DealLabel, error) { func labelFromGoString(s string) (markettypes.DealLabel, error) {

View File

@ -64,7 +64,7 @@ func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error {
if err != nil { if err != nil {
return err return err
} }
d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), ds}) d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds})
return nil return nil
} }
@ -77,7 +77,7 @@ func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error {
if err != nil { if err != nil {
return err return err
} }
if !dsFrom.Equals(dsTo) { if *dsFrom != *dsTo {
d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo}) d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo})
} }
return nil return nil
@ -88,6 +88,6 @@ func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error {
if err != nil { if err != nil {
return err return err
} }
d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), ds}) d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds})
return nil return nil
} }

View File

@ -58,9 +58,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return load12(store, act.Head) return load12(store, act.Head)
case actorstypes.Version13:
return load13(store, act.Head)
} }
} }
@ -131,9 +128,6 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return make12(store) return make12(store)
case actorstypes.Version13:
return make13(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -168,10 +162,10 @@ type BalanceTable interface {
type DealStates interface { type DealStates interface {
ForEach(cb func(id abi.DealID, ds DealState) error) error ForEach(cb func(id abi.DealID, ds DealState) error) error
Get(id abi.DealID) (DealState, bool, error) Get(id abi.DealID) (*DealState, bool, error)
array() adt.Array array() adt.Array
decode(*cbg.Deferred) (DealState, error) decode(*cbg.Deferred) (*DealState, error)
} }
type DealProposals interface { type DealProposals interface {
@ -232,9 +226,6 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
case actorstypes.Version12: case actorstypes.Version12:
return decodePublishStorageDealsReturn12(b) return decodePublishStorageDealsReturn12(b)
case actorstypes.Version13:
return decodePublishStorageDealsReturn13(b)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -242,17 +233,7 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
type DealProposal = markettypes.DealProposal type DealProposal = markettypes.DealProposal
type DealLabel = markettypes.DealLabel type DealLabel = markettypes.DealLabel
type DealState interface { type DealState = markettypes.DealState
SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector
LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated
SlashEpoch() abi.ChainEpoch // -1 if deal never slashed
Equals(other DealState) bool
}
func DealStatesEqual(a, b DealState) bool {
return DealStatesEqual(a, b)
}
type DealStateChanges struct { type DealStateChanges struct {
Added []DealIDState Added []DealIDState
@ -268,8 +249,8 @@ type DealIDState struct {
// DealStateChange is a change in deal state from -> to // DealStateChange is a change in deal state from -> to
type DealStateChange struct { type DealStateChange struct {
ID abi.DealID ID abi.DealID
From DealState From *DealState
To DealState To *DealState
} }
type DealProposalChanges struct { type DealProposalChanges struct {
@ -282,35 +263,12 @@ type ProposalIDState struct {
Proposal markettypes.DealProposal Proposal markettypes.DealProposal
} }
type emptyDealState struct{} func EmptyDealState() *DealState {
return &DealState{
func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { SectorStartEpoch: -1,
return -1 SlashEpoch: -1,
} LastUpdatedEpoch: -1,
func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch {
return -1
}
func (e *emptyDealState) SlashEpoch() abi.ChainEpoch {
return -1
}
func (e *emptyDealState) Equals(other DealState) bool {
if e.SectorStartEpoch() != other.SectorStartEpoch() {
return false
} }
if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if e.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
func EmptyDealState() DealState {
return &emptyDealState{}
} }
// returns the earned fees and pending fees for a given deal // returns the earned fees and pending fees for a given deal
@ -329,8 +287,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke
return ef, big.Sub(tf, ef) return ef, big.Sub(tf, ef)
} }
func IsDealActive(state DealState) bool { func IsDealActive(state markettypes.DealState) bool {
return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 return state.SectorStartEpoch > -1 && state.SlashEpoch == -1
} }
func labelFromGoString(s string) (markettypes.DealLabel, error) { func labelFromGoString(s string) (markettypes.DealLabel, error) {
@ -355,6 +313,5 @@ func AllCodes() []cid.Cid {
(&state10{}).Code(), (&state10{}).Code(),
(&state11{}).Code(), (&state11{}).Code(),
(&state12{}).Code(), (&state12{}).Code(),
(&state13{}).Code(),
} }
} }

View File

@ -175,7 +175,7 @@ type dealStates{{.v}} struct {
adt.Array adt.Array
} }
func (s *dealStates{{.v}}) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal{{.v}} market{{.v}}.DealState var deal{{.v}} market{{.v}}.DealState
found, err := s.Array.Get(uint64(dealID), &deal{{.v}}) found, err := s.Array.Get(uint64(dealID), &deal{{.v}})
if err != nil { if err != nil {
@ -185,7 +185,7 @@ func (s *dealStates{{.v}}) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV{{.v}}DealState(deal{{.v}}) deal := fromV{{.v}}DealState(deal{{.v}})
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -195,57 +195,31 @@ func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) erro
}) })
} }
func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (*DealState, error) {
var ds{{.v}} market{{.v}}.DealState var ds{{.v}} market{{.v}}.DealState
if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV{{.v}}DealState(ds{{.v}}) ds := fromV{{.v}}DealState(ds{{.v}})
return ds, nil return &ds, nil
} }
func (s *dealStates{{.v}}) array() adt.Array { func (s *dealStates{{.v}}) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV{{.v}} struct {
ds{{.v}} market{{.v}}.DealState
}
func (d dealStateV{{.v}}) SectorStartEpoch() abi.ChainEpoch {
return d.ds{{.v}}.SectorStartEpoch
}
func (d dealStateV{{.v}}) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds{{.v}}.LastUpdatedEpoch
}
func (d dealStateV{{.v}}) SlashEpoch() abi.ChainEpoch {
return d.ds{{.v}}.SlashEpoch
}
func (d dealStateV{{.v}}) Equals(other DealState) bool {
if ov{{.v}}, ok := other.(dealStateV{{.v}}); ok {
return d.ds{{.v}} == ov{{.v}}.ds{{.v}}
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV{{.v}})(nil)
func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState {
return dealStateV{{.v}}{v{{.v}}} ret := DealState{
SectorStartEpoch: v{{.v}}.SectorStartEpoch,
LastUpdatedEpoch: v{{.v}}.LastUpdatedEpoch,
SlashEpoch: v{{.v}}.SlashEpoch,
VerifiedClaim: 0,
}
{{if (ge .v 9)}}
ret.VerifiedClaim = verifregtypes.AllocationId(v{{.v}}.VerifiedClaim)
{{end}}
return ret
} }
type dealProposals{{.v}} struct { type dealProposals{{.v}} struct {

View File

@ -154,7 +154,7 @@ type dealStates0 struct {
adt.Array adt.Array
} }
func (s *dealStates0) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal0 market0.DealState var deal0 market0.DealState
found, err := s.Array.Get(uint64(dealID), &deal0) found, err := s.Array.Get(uint64(dealID), &deal0)
if err != nil { if err != nil {
@ -164,7 +164,7 @@ func (s *dealStates0) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV0DealState(deal0) deal := fromV0DealState(deal0)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -174,57 +174,28 @@ func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates0) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) {
var ds0 market0.DealState var ds0 market0.DealState
if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV0DealState(ds0) ds := fromV0DealState(ds0)
return ds, nil return &ds, nil
} }
func (s *dealStates0) array() adt.Array { func (s *dealStates0) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV0 struct {
ds0 market0.DealState
}
func (d dealStateV0) SectorStartEpoch() abi.ChainEpoch {
return d.ds0.SectorStartEpoch
}
func (d dealStateV0) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds0.LastUpdatedEpoch
}
func (d dealStateV0) SlashEpoch() abi.ChainEpoch {
return d.ds0.SlashEpoch
}
func (d dealStateV0) Equals(other DealState) bool {
if ov0, ok := other.(dealStateV0); ok {
return d.ds0 == ov0.ds0
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV0)(nil)
func fromV0DealState(v0 market0.DealState) DealState { func fromV0DealState(v0 market0.DealState) DealState {
return dealStateV0{v0} ret := DealState{
SectorStartEpoch: v0.SectorStartEpoch,
LastUpdatedEpoch: v0.LastUpdatedEpoch,
SlashEpoch: v0.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals0 struct { type dealProposals0 struct {

View File

@ -153,7 +153,7 @@ type dealStates10 struct {
adt.Array adt.Array
} }
func (s *dealStates10) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates10) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal10 market10.DealState var deal10 market10.DealState
found, err := s.Array.Get(uint64(dealID), &deal10) found, err := s.Array.Get(uint64(dealID), &deal10)
if err != nil { if err != nil {
@ -163,7 +163,7 @@ func (s *dealStates10) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV10DealState(deal10) deal := fromV10DealState(deal10)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -173,57 +173,30 @@ func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) e
}) })
} }
func (s *dealStates10) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates10) decode(val *cbg.Deferred) (*DealState, error) {
var ds10 market10.DealState var ds10 market10.DealState
if err := ds10.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds10.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV10DealState(ds10) ds := fromV10DealState(ds10)
return ds, nil return &ds, nil
} }
func (s *dealStates10) array() adt.Array { func (s *dealStates10) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV10 struct {
ds10 market10.DealState
}
func (d dealStateV10) SectorStartEpoch() abi.ChainEpoch {
return d.ds10.SectorStartEpoch
}
func (d dealStateV10) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds10.LastUpdatedEpoch
}
func (d dealStateV10) SlashEpoch() abi.ChainEpoch {
return d.ds10.SlashEpoch
}
func (d dealStateV10) Equals(other DealState) bool {
if ov10, ok := other.(dealStateV10); ok {
return d.ds10 == ov10.ds10
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV10)(nil)
func fromV10DealState(v10 market10.DealState) DealState { func fromV10DealState(v10 market10.DealState) DealState {
return dealStateV10{v10} ret := DealState{
SectorStartEpoch: v10.SectorStartEpoch,
LastUpdatedEpoch: v10.LastUpdatedEpoch,
SlashEpoch: v10.SlashEpoch,
VerifiedClaim: 0,
}
ret.VerifiedClaim = verifregtypes.AllocationId(v10.VerifiedClaim)
return ret
} }
type dealProposals10 struct { type dealProposals10 struct {

View File

@ -153,7 +153,7 @@ type dealStates11 struct {
adt.Array adt.Array
} }
func (s *dealStates11) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates11) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal11 market11.DealState var deal11 market11.DealState
found, err := s.Array.Get(uint64(dealID), &deal11) found, err := s.Array.Get(uint64(dealID), &deal11)
if err != nil { if err != nil {
@ -163,7 +163,7 @@ func (s *dealStates11) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV11DealState(deal11) deal := fromV11DealState(deal11)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -173,57 +173,30 @@ func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) e
}) })
} }
func (s *dealStates11) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates11) decode(val *cbg.Deferred) (*DealState, error) {
var ds11 market11.DealState var ds11 market11.DealState
if err := ds11.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds11.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV11DealState(ds11) ds := fromV11DealState(ds11)
return ds, nil return &ds, nil
} }
func (s *dealStates11) array() adt.Array { func (s *dealStates11) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV11 struct {
ds11 market11.DealState
}
func (d dealStateV11) SectorStartEpoch() abi.ChainEpoch {
return d.ds11.SectorStartEpoch
}
func (d dealStateV11) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds11.LastUpdatedEpoch
}
func (d dealStateV11) SlashEpoch() abi.ChainEpoch {
return d.ds11.SlashEpoch
}
func (d dealStateV11) Equals(other DealState) bool {
if ov11, ok := other.(dealStateV11); ok {
return d.ds11 == ov11.ds11
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV11)(nil)
func fromV11DealState(v11 market11.DealState) DealState { func fromV11DealState(v11 market11.DealState) DealState {
return dealStateV11{v11} ret := DealState{
SectorStartEpoch: v11.SectorStartEpoch,
LastUpdatedEpoch: v11.LastUpdatedEpoch,
SlashEpoch: v11.SlashEpoch,
VerifiedClaim: 0,
}
ret.VerifiedClaim = verifregtypes.AllocationId(v11.VerifiedClaim)
return ret
} }
type dealProposals11 struct { type dealProposals11 struct {

View File

@ -153,7 +153,7 @@ type dealStates12 struct {
adt.Array adt.Array
} }
func (s *dealStates12) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal12 market12.DealState var deal12 market12.DealState
found, err := s.Array.Get(uint64(dealID), &deal12) found, err := s.Array.Get(uint64(dealID), &deal12)
if err != nil { if err != nil {
@ -163,7 +163,7 @@ func (s *dealStates12) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV12DealState(deal12) deal := fromV12DealState(deal12)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -173,57 +173,30 @@ func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) e
}) })
} }
func (s *dealStates12) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates12) decode(val *cbg.Deferred) (*DealState, error) {
var ds12 market12.DealState var ds12 market12.DealState
if err := ds12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV12DealState(ds12) ds := fromV12DealState(ds12)
return ds, nil return &ds, nil
} }
func (s *dealStates12) array() adt.Array { func (s *dealStates12) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV12 struct {
ds12 market12.DealState
}
func (d dealStateV12) SectorStartEpoch() abi.ChainEpoch {
return d.ds12.SectorStartEpoch
}
func (d dealStateV12) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds12.LastUpdatedEpoch
}
func (d dealStateV12) SlashEpoch() abi.ChainEpoch {
return d.ds12.SlashEpoch
}
func (d dealStateV12) Equals(other DealState) bool {
if ov12, ok := other.(dealStateV12); ok {
return d.ds12 == ov12.ds12
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV12)(nil)
func fromV12DealState(v12 market12.DealState) DealState { func fromV12DealState(v12 market12.DealState) DealState {
return dealStateV12{v12} ret := DealState{
SectorStartEpoch: v12.SectorStartEpoch,
LastUpdatedEpoch: v12.LastUpdatedEpoch,
SlashEpoch: v12.SlashEpoch,
VerifiedClaim: 0,
}
ret.VerifiedClaim = verifregtypes.AllocationId(v12.VerifiedClaim)
return ret
} }
type dealProposals12 struct { type dealProposals12 struct {

View File

@ -1,404 +0,0 @@
package market
import (
"bytes"
"fmt"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/builtin"
market13 "github.com/filecoin-project/go-state-types/builtin/v13/market"
adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt"
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types"
)
var _ State = (*state13)(nil)
func load13(store adt.Store, root cid.Cid) (State, error) {
out := state13{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make13(store adt.Store) (State, error) {
out := state13{store: store}
s, err := market13.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state13 struct {
market13.State
store adt.Store
}
func (s *state13) TotalLocked() (abi.TokenAmount, error) {
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
fml = types.BigAdd(fml, s.TotalClientStorageFee)
return fml, nil
}
func (s *state13) BalancesChanged(otherState State) (bool, error) {
otherState13, ok := otherState.(*state13)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.EscrowTable.Equals(otherState13.State.EscrowTable) || !s.State.LockedTable.Equals(otherState13.State.LockedTable), nil
}
func (s *state13) StatesChanged(otherState State) (bool, error) {
otherState13, ok := otherState.(*state13)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.States.Equals(otherState13.State.States), nil
}
func (s *state13) States() (DealStates, error) {
stateArray, err := adt13.AsArray(s.store, s.State.States, market13.StatesAmtBitwidth)
if err != nil {
return nil, err
}
return &dealStates13{stateArray}, nil
}
func (s *state13) ProposalsChanged(otherState State) (bool, error) {
otherState13, ok := otherState.(*state13)
if !ok {
// there's no way to compare different versions of the state, so let's
// just say that means the state of balances has changed
return true, nil
}
return !s.State.Proposals.Equals(otherState13.State.Proposals), nil
}
func (s *state13) Proposals() (DealProposals, error) {
proposalArray, err := adt13.AsArray(s.store, s.State.Proposals, market13.ProposalsAmtBitwidth)
if err != nil {
return nil, err
}
return &dealProposals13{proposalArray}, nil
}
func (s *state13) EscrowTable() (BalanceTable, error) {
bt, err := adt13.AsBalanceTable(s.store, s.State.EscrowTable)
if err != nil {
return nil, err
}
return &balanceTable13{bt}, nil
}
func (s *state13) LockedTable() (BalanceTable, error) {
bt, err := adt13.AsBalanceTable(s.store, s.State.LockedTable)
if err != nil {
return nil, err
}
return &balanceTable13{bt}, nil
}
func (s *state13) VerifyDealsForActivation(
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error) {
w, vw, _, err := market13.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
return w, vw, err
}
func (s *state13) NextID() (abi.DealID, error) {
return s.State.NextID, nil
}
type balanceTable13 struct {
*adt13.BalanceTable
}
func (bt *balanceTable13) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
asMap := (*adt13.Map)(bt.BalanceTable)
var ta abi.TokenAmount
return asMap.ForEach(&ta, func(key string) error {
a, err := address.NewFromBytes([]byte(key))
if err != nil {
return err
}
return cb(a, ta)
})
}
type dealStates13 struct {
adt.Array
}
func (s *dealStates13) Get(dealID abi.DealID) (DealState, bool, error) {
var deal13 market13.DealState
found, err := s.Array.Get(uint64(dealID), &deal13)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, nil
}
deal := fromV13DealState(deal13)
return deal, true, nil
}
func (s *dealStates13) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
var ds13 market13.DealState
return s.Array.ForEach(&ds13, func(idx int64) error {
return cb(abi.DealID(idx), fromV13DealState(ds13))
})
}
func (s *dealStates13) decode(val *cbg.Deferred) (DealState, error) {
var ds13 market13.DealState
if err := ds13.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
ds := fromV13DealState(ds13)
return ds, nil
}
func (s *dealStates13) array() adt.Array {
return s.Array
}
type dealStateV13 struct {
ds13 market13.DealState
}
func (d dealStateV13) SectorStartEpoch() abi.ChainEpoch {
return d.ds13.SectorStartEpoch
}
func (d dealStateV13) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds13.LastUpdatedEpoch
}
func (d dealStateV13) SlashEpoch() abi.ChainEpoch {
return d.ds13.SlashEpoch
}
func (d dealStateV13) Equals(other DealState) bool {
if ov13, ok := other.(dealStateV13); ok {
return d.ds13 == ov13.ds13
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV13)(nil)
func fromV13DealState(v13 market13.DealState) DealState {
return dealStateV13{v13}
}
type dealProposals13 struct {
adt.Array
}
func (s *dealProposals13) Get(dealID abi.DealID) (*DealProposal, bool, error) {
var proposal13 market13.DealProposal
found, err := s.Array.Get(uint64(dealID), &proposal13)
if err != nil {
return nil, false, err
}
if !found {
return nil, false, nil
}
proposal, err := fromV13DealProposal(proposal13)
if err != nil {
return nil, true, xerrors.Errorf("decoding proposal: %w", err)
}
return &proposal, true, nil
}
func (s *dealProposals13) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
var dp13 market13.DealProposal
return s.Array.ForEach(&dp13, func(idx int64) error {
dp, err := fromV13DealProposal(dp13)
if err != nil {
return xerrors.Errorf("decoding proposal: %w", err)
}
return cb(abi.DealID(idx), dp)
})
}
func (s *dealProposals13) decode(val *cbg.Deferred) (*DealProposal, error) {
var dp13 market13.DealProposal
if err := dp13.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err
}
dp, err := fromV13DealProposal(dp13)
if err != nil {
return nil, err
}
return &dp, nil
}
func (s *dealProposals13) array() adt.Array {
return s.Array
}
func fromV13DealProposal(v13 market13.DealProposal) (DealProposal, error) {
label, err := fromV13Label(v13.Label)
if err != nil {
return DealProposal{}, xerrors.Errorf("error setting deal label: %w", err)
}
return DealProposal{
PieceCID: v13.PieceCID,
PieceSize: v13.PieceSize,
VerifiedDeal: v13.VerifiedDeal,
Client: v13.Client,
Provider: v13.Provider,
Label: label,
StartEpoch: v13.StartEpoch,
EndEpoch: v13.EndEpoch,
StoragePricePerEpoch: v13.StoragePricePerEpoch,
ProviderCollateral: v13.ProviderCollateral,
ClientCollateral: v13.ClientCollateral,
}, nil
}
func fromV13Label(v13 market13.DealLabel) (DealLabel, error) {
if v13.IsString() {
str, err := v13.ToString()
if err != nil {
return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert string label to string: %w", err)
}
return markettypes.NewLabelFromString(str)
}
bs, err := v13.ToBytes()
if err != nil {
return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert bytes label to bytes: %w", err)
}
return markettypes.NewLabelFromBytes(bs)
}
func (s *state13) GetState() interface{} {
return &s.State
}
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn13)(nil)
func decodePublishStorageDealsReturn13(b []byte) (PublishStorageDealsReturn, error) {
var retval market13.PublishStorageDealsReturn
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
}
return &publishStorageDealsReturn13{retval}, nil
}
type publishStorageDealsReturn13 struct {
market13.PublishStorageDealsReturn
}
func (r *publishStorageDealsReturn13) IsDealValid(index uint64) (bool, int, error) {
set, err := r.ValidDeals.IsSet(index)
if err != nil || !set {
return false, -1, err
}
maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}})
if err != nil {
return false, -1, err
}
before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals)
if err != nil {
return false, -1, err
}
outIdx, err := before.Count()
if err != nil {
return false, -1, err
}
return set, int(outIdx), nil
}
func (r *publishStorageDealsReturn13) DealIDs() ([]abi.DealID, error) {
return r.IDs, nil
}
func (s *state13) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) {
allocations, err := adt13.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth)
if err != nil {
return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
}
var allocationId cbg.CborInt
found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId)
if err != nil {
return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
}
if !found {
return verifregtypes.NoAllocationID, nil
}
return verifregtypes.AllocationId(allocationId), nil
}
func (s *state13) ActorKey() string {
return manifest.MarketKey
}
func (s *state13) ActorVersion() actorstypes.Version {
return actorstypes.Version13
}
func (s *state13) Code() cid.Cid {
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
if !ok {
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
}
return code
}

View File

@ -154,7 +154,7 @@ type dealStates2 struct {
adt.Array adt.Array
} }
func (s *dealStates2) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal2 market2.DealState var deal2 market2.DealState
found, err := s.Array.Get(uint64(dealID), &deal2) found, err := s.Array.Get(uint64(dealID), &deal2)
if err != nil { if err != nil {
@ -164,7 +164,7 @@ func (s *dealStates2) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV2DealState(deal2) deal := fromV2DealState(deal2)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -174,57 +174,28 @@ func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates2) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) {
var ds2 market2.DealState var ds2 market2.DealState
if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV2DealState(ds2) ds := fromV2DealState(ds2)
return ds, nil return &ds, nil
} }
func (s *dealStates2) array() adt.Array { func (s *dealStates2) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV2 struct {
ds2 market2.DealState
}
func (d dealStateV2) SectorStartEpoch() abi.ChainEpoch {
return d.ds2.SectorStartEpoch
}
func (d dealStateV2) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds2.LastUpdatedEpoch
}
func (d dealStateV2) SlashEpoch() abi.ChainEpoch {
return d.ds2.SlashEpoch
}
func (d dealStateV2) Equals(other DealState) bool {
if ov2, ok := other.(dealStateV2); ok {
return d.ds2 == ov2.ds2
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV2)(nil)
func fromV2DealState(v2 market2.DealState) DealState { func fromV2DealState(v2 market2.DealState) DealState {
return dealStateV2{v2} ret := DealState{
SectorStartEpoch: v2.SectorStartEpoch,
LastUpdatedEpoch: v2.LastUpdatedEpoch,
SlashEpoch: v2.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals2 struct { type dealProposals2 struct {

View File

@ -149,7 +149,7 @@ type dealStates3 struct {
adt.Array adt.Array
} }
func (s *dealStates3) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal3 market3.DealState var deal3 market3.DealState
found, err := s.Array.Get(uint64(dealID), &deal3) found, err := s.Array.Get(uint64(dealID), &deal3)
if err != nil { if err != nil {
@ -159,7 +159,7 @@ func (s *dealStates3) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV3DealState(deal3) deal := fromV3DealState(deal3)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -169,57 +169,28 @@ func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates3) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) {
var ds3 market3.DealState var ds3 market3.DealState
if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV3DealState(ds3) ds := fromV3DealState(ds3)
return ds, nil return &ds, nil
} }
func (s *dealStates3) array() adt.Array { func (s *dealStates3) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV3 struct {
ds3 market3.DealState
}
func (d dealStateV3) SectorStartEpoch() abi.ChainEpoch {
return d.ds3.SectorStartEpoch
}
func (d dealStateV3) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds3.LastUpdatedEpoch
}
func (d dealStateV3) SlashEpoch() abi.ChainEpoch {
return d.ds3.SlashEpoch
}
func (d dealStateV3) Equals(other DealState) bool {
if ov3, ok := other.(dealStateV3); ok {
return d.ds3 == ov3.ds3
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV3)(nil)
func fromV3DealState(v3 market3.DealState) DealState { func fromV3DealState(v3 market3.DealState) DealState {
return dealStateV3{v3} ret := DealState{
SectorStartEpoch: v3.SectorStartEpoch,
LastUpdatedEpoch: v3.LastUpdatedEpoch,
SlashEpoch: v3.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals3 struct { type dealProposals3 struct {

View File

@ -149,7 +149,7 @@ type dealStates4 struct {
adt.Array adt.Array
} }
func (s *dealStates4) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal4 market4.DealState var deal4 market4.DealState
found, err := s.Array.Get(uint64(dealID), &deal4) found, err := s.Array.Get(uint64(dealID), &deal4)
if err != nil { if err != nil {
@ -159,7 +159,7 @@ func (s *dealStates4) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV4DealState(deal4) deal := fromV4DealState(deal4)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -169,57 +169,28 @@ func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates4) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates4) decode(val *cbg.Deferred) (*DealState, error) {
var ds4 market4.DealState var ds4 market4.DealState
if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV4DealState(ds4) ds := fromV4DealState(ds4)
return ds, nil return &ds, nil
} }
func (s *dealStates4) array() adt.Array { func (s *dealStates4) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV4 struct {
ds4 market4.DealState
}
func (d dealStateV4) SectorStartEpoch() abi.ChainEpoch {
return d.ds4.SectorStartEpoch
}
func (d dealStateV4) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds4.LastUpdatedEpoch
}
func (d dealStateV4) SlashEpoch() abi.ChainEpoch {
return d.ds4.SlashEpoch
}
func (d dealStateV4) Equals(other DealState) bool {
if ov4, ok := other.(dealStateV4); ok {
return d.ds4 == ov4.ds4
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV4)(nil)
func fromV4DealState(v4 market4.DealState) DealState { func fromV4DealState(v4 market4.DealState) DealState {
return dealStateV4{v4} ret := DealState{
SectorStartEpoch: v4.SectorStartEpoch,
LastUpdatedEpoch: v4.LastUpdatedEpoch,
SlashEpoch: v4.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals4 struct { type dealProposals4 struct {

View File

@ -149,7 +149,7 @@ type dealStates5 struct {
adt.Array adt.Array
} }
func (s *dealStates5) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal5 market5.DealState var deal5 market5.DealState
found, err := s.Array.Get(uint64(dealID), &deal5) found, err := s.Array.Get(uint64(dealID), &deal5)
if err != nil { if err != nil {
@ -159,7 +159,7 @@ func (s *dealStates5) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV5DealState(deal5) deal := fromV5DealState(deal5)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -169,57 +169,28 @@ func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates5) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) {
var ds5 market5.DealState var ds5 market5.DealState
if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV5DealState(ds5) ds := fromV5DealState(ds5)
return ds, nil return &ds, nil
} }
func (s *dealStates5) array() adt.Array { func (s *dealStates5) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV5 struct {
ds5 market5.DealState
}
func (d dealStateV5) SectorStartEpoch() abi.ChainEpoch {
return d.ds5.SectorStartEpoch
}
func (d dealStateV5) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds5.LastUpdatedEpoch
}
func (d dealStateV5) SlashEpoch() abi.ChainEpoch {
return d.ds5.SlashEpoch
}
func (d dealStateV5) Equals(other DealState) bool {
if ov5, ok := other.(dealStateV5); ok {
return d.ds5 == ov5.ds5
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV5)(nil)
func fromV5DealState(v5 market5.DealState) DealState { func fromV5DealState(v5 market5.DealState) DealState {
return dealStateV5{v5} ret := DealState{
SectorStartEpoch: v5.SectorStartEpoch,
LastUpdatedEpoch: v5.LastUpdatedEpoch,
SlashEpoch: v5.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals5 struct { type dealProposals5 struct {

View File

@ -151,7 +151,7 @@ type dealStates6 struct {
adt.Array adt.Array
} }
func (s *dealStates6) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal6 market6.DealState var deal6 market6.DealState
found, err := s.Array.Get(uint64(dealID), &deal6) found, err := s.Array.Get(uint64(dealID), &deal6)
if err != nil { if err != nil {
@ -161,7 +161,7 @@ func (s *dealStates6) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV6DealState(deal6) deal := fromV6DealState(deal6)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -171,57 +171,28 @@ func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates6) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates6) decode(val *cbg.Deferred) (*DealState, error) {
var ds6 market6.DealState var ds6 market6.DealState
if err := ds6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV6DealState(ds6) ds := fromV6DealState(ds6)
return ds, nil return &ds, nil
} }
func (s *dealStates6) array() adt.Array { func (s *dealStates6) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV6 struct {
ds6 market6.DealState
}
func (d dealStateV6) SectorStartEpoch() abi.ChainEpoch {
return d.ds6.SectorStartEpoch
}
func (d dealStateV6) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds6.LastUpdatedEpoch
}
func (d dealStateV6) SlashEpoch() abi.ChainEpoch {
return d.ds6.SlashEpoch
}
func (d dealStateV6) Equals(other DealState) bool {
if ov6, ok := other.(dealStateV6); ok {
return d.ds6 == ov6.ds6
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV6)(nil)
func fromV6DealState(v6 market6.DealState) DealState { func fromV6DealState(v6 market6.DealState) DealState {
return dealStateV6{v6} ret := DealState{
SectorStartEpoch: v6.SectorStartEpoch,
LastUpdatedEpoch: v6.LastUpdatedEpoch,
SlashEpoch: v6.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals6 struct { type dealProposals6 struct {

View File

@ -151,7 +151,7 @@ type dealStates7 struct {
adt.Array adt.Array
} }
func (s *dealStates7) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal7 market7.DealState var deal7 market7.DealState
found, err := s.Array.Get(uint64(dealID), &deal7) found, err := s.Array.Get(uint64(dealID), &deal7)
if err != nil { if err != nil {
@ -161,7 +161,7 @@ func (s *dealStates7) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV7DealState(deal7) deal := fromV7DealState(deal7)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -171,57 +171,28 @@ func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates7) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) {
var ds7 market7.DealState var ds7 market7.DealState
if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV7DealState(ds7) ds := fromV7DealState(ds7)
return ds, nil return &ds, nil
} }
func (s *dealStates7) array() adt.Array { func (s *dealStates7) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV7 struct {
ds7 market7.DealState
}
func (d dealStateV7) SectorStartEpoch() abi.ChainEpoch {
return d.ds7.SectorStartEpoch
}
func (d dealStateV7) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds7.LastUpdatedEpoch
}
func (d dealStateV7) SlashEpoch() abi.ChainEpoch {
return d.ds7.SlashEpoch
}
func (d dealStateV7) Equals(other DealState) bool {
if ov7, ok := other.(dealStateV7); ok {
return d.ds7 == ov7.ds7
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV7)(nil)
func fromV7DealState(v7 market7.DealState) DealState { func fromV7DealState(v7 market7.DealState) DealState {
return dealStateV7{v7} ret := DealState{
SectorStartEpoch: v7.SectorStartEpoch,
LastUpdatedEpoch: v7.LastUpdatedEpoch,
SlashEpoch: v7.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals7 struct { type dealProposals7 struct {

View File

@ -152,7 +152,7 @@ type dealStates8 struct {
adt.Array adt.Array
} }
func (s *dealStates8) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal8 market8.DealState var deal8 market8.DealState
found, err := s.Array.Get(uint64(dealID), &deal8) found, err := s.Array.Get(uint64(dealID), &deal8)
if err != nil { if err != nil {
@ -162,7 +162,7 @@ func (s *dealStates8) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV8DealState(deal8) deal := fromV8DealState(deal8)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -172,57 +172,28 @@ func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates8) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates8) decode(val *cbg.Deferred) (*DealState, error) {
var ds8 market8.DealState var ds8 market8.DealState
if err := ds8.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds8.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV8DealState(ds8) ds := fromV8DealState(ds8)
return ds, nil return &ds, nil
} }
func (s *dealStates8) array() adt.Array { func (s *dealStates8) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV8 struct {
ds8 market8.DealState
}
func (d dealStateV8) SectorStartEpoch() abi.ChainEpoch {
return d.ds8.SectorStartEpoch
}
func (d dealStateV8) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds8.LastUpdatedEpoch
}
func (d dealStateV8) SlashEpoch() abi.ChainEpoch {
return d.ds8.SlashEpoch
}
func (d dealStateV8) Equals(other DealState) bool {
if ov8, ok := other.(dealStateV8); ok {
return d.ds8 == ov8.ds8
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV8)(nil)
func fromV8DealState(v8 market8.DealState) DealState { func fromV8DealState(v8 market8.DealState) DealState {
return dealStateV8{v8} ret := DealState{
SectorStartEpoch: v8.SectorStartEpoch,
LastUpdatedEpoch: v8.LastUpdatedEpoch,
SlashEpoch: v8.SlashEpoch,
VerifiedClaim: 0,
}
return ret
} }
type dealProposals8 struct { type dealProposals8 struct {

View File

@ -153,7 +153,7 @@ type dealStates9 struct {
adt.Array adt.Array
} }
func (s *dealStates9) Get(dealID abi.DealID) (DealState, bool, error) { func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) {
var deal9 market9.DealState var deal9 market9.DealState
found, err := s.Array.Get(uint64(dealID), &deal9) found, err := s.Array.Get(uint64(dealID), &deal9)
if err != nil { if err != nil {
@ -163,7 +163,7 @@ func (s *dealStates9) Get(dealID abi.DealID) (DealState, bool, error) {
return nil, false, nil return nil, false, nil
} }
deal := fromV9DealState(deal9) deal := fromV9DealState(deal9)
return deal, true, nil return &deal, true, nil
} }
func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
@ -173,57 +173,30 @@ func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) er
}) })
} }
func (s *dealStates9) decode(val *cbg.Deferred) (DealState, error) { func (s *dealStates9) decode(val *cbg.Deferred) (*DealState, error) {
var ds9 market9.DealState var ds9 market9.DealState
if err := ds9.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { if err := ds9.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
return nil, err return nil, err
} }
ds := fromV9DealState(ds9) ds := fromV9DealState(ds9)
return ds, nil return &ds, nil
} }
func (s *dealStates9) array() adt.Array { func (s *dealStates9) array() adt.Array {
return s.Array return s.Array
} }
type dealStateV9 struct {
ds9 market9.DealState
}
func (d dealStateV9) SectorStartEpoch() abi.ChainEpoch {
return d.ds9.SectorStartEpoch
}
func (d dealStateV9) LastUpdatedEpoch() abi.ChainEpoch {
return d.ds9.LastUpdatedEpoch
}
func (d dealStateV9) SlashEpoch() abi.ChainEpoch {
return d.ds9.SlashEpoch
}
func (d dealStateV9) Equals(other DealState) bool {
if ov9, ok := other.(dealStateV9); ok {
return d.ds9 == ov9.ds9
}
if d.SectorStartEpoch() != other.SectorStartEpoch() {
return false
}
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
return false
}
if d.SlashEpoch() != other.SlashEpoch() {
return false
}
return true
}
var _ DealState = (*dealStateV9)(nil)
func fromV9DealState(v9 market9.DealState) DealState { func fromV9DealState(v9 market9.DealState) DealState {
return dealStateV9{v9} ret := DealState{
SectorStartEpoch: v9.SectorStartEpoch,
LastUpdatedEpoch: v9.LastUpdatedEpoch,
SlashEpoch: v9.SlashEpoch,
VerifiedClaim: 0,
}
ret.VerifiedClaim = verifregtypes.AllocationId(v9.VerifiedClaim)
return ret
} }
type dealProposals9 struct { type dealProposals9 struct {

View File

@ -17,7 +17,6 @@ import (
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
@ -240,9 +239,7 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams
type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams
type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams
type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams
type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2
type ReplicaUpdate = minertypes.ReplicaUpdate type ReplicaUpdate = minertypes.ReplicaUpdate
type ReplicaUpdate2 = minertypes.ReplicaUpdate2
type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams
type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2
type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params
@ -251,12 +248,6 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2
type CompactPartitionsParams = minertypes.CompactPartitionsParams type CompactPartitionsParams = minertypes.CompactPartitionsParams
type WithdrawBalanceParams = minertypes.WithdrawBalanceParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams
type PieceActivationManifest = minertypes13.PieceActivationManifest
type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params
type SectorActivationManifest = minertypes13.SectorActivationManifest
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
type SectorUpdateManifest = minertypes13.SectorUpdateManifest
var QAPowerMax = minertypes.QAPowerMax var QAPowerMax = minertypes.QAPowerMax
type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo

View File

@ -9,7 +9,6 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
@ -52,9 +51,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
case actorstypes.Version12: case actorstypes.Version12:
return load12(store, act.Head) return load12(store, act.Head)
case actorstypes.Version13:
return load13(store, act.Head)
} }
} }
@ -125,9 +121,6 @@ func MakeState(store adt.Store, av actors.Version) (State, error) {
case actors.Version12: case actors.Version12:
return make12(store) return make12(store)
case actors.Version13:
return make13(store)
} }
return nil, xerrors.Errorf("unknown actor version %d", av) return nil, xerrors.Errorf("unknown actor version %d", av)
} }
@ -306,9 +299,7 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams
type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams
type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams
type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams
type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2
type ReplicaUpdate = minertypes.ReplicaUpdate type ReplicaUpdate = minertypes.ReplicaUpdate
type ReplicaUpdate2 = minertypes.ReplicaUpdate2
type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams
type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2
type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params
@ -317,12 +308,6 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2
type CompactPartitionsParams = minertypes.CompactPartitionsParams type CompactPartitionsParams = minertypes.CompactPartitionsParams
type WithdrawBalanceParams = minertypes.WithdrawBalanceParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams
type PieceActivationManifest = minertypes13.PieceActivationManifest
type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params
type SectorActivationManifest = minertypes13.SectorActivationManifest
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
type SectorUpdateManifest = minertypes13.SectorUpdateManifest
var QAPowerMax = minertypes.QAPowerMax var QAPowerMax = minertypes.QAPowerMax
type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo
@ -389,6 +374,5 @@ func AllCodes() []cid.Cid {
(&state10{}).Code(), (&state10{}).Code(),
(&state11{}).Code(), (&state11{}).Code(),
(&state12{}).Code(), (&state12{}).Code(),
(&state13{}).Code(),
} }
} }

View File

@ -72,7 +72,7 @@ func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.Token
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal) available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state0) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available = s.GetAvailableBalance(bal) available = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state10) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state11) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state12) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -1,591 +0,0 @@
package miner
import (
"bytes"
"errors"
"fmt"
"github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtin13 "github.com/filecoin-project/go-state-types/builtin"
miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt"
"github.com/filecoin-project/go-state-types/dline"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
var _ State = (*state13)(nil)
func load13(store adt.Store, root cid.Cid) (State, error) {
out := state13{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make13(store adt.Store) (State, error) {
out := state13{store: store}
out.State = miner13.State{}
return &out, nil
}
type state13 struct {
miner13.State
store adt.Store
}
type deadline13 struct {
miner13.Deadline
store adt.Store
}
type partition13 struct {
miner13.Partition
store adt.Store
}
func (s *state13) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) {
defer func() {
if r := recover(); r != nil {
err = xerrors.Errorf("failed to get available balance: %w", r)
available = abi.NewTokenAmount(0)
}
}()
// this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal)
return available, err
}
func (s *state13) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
return s.CheckVestedFunds(s.store, epoch)
}
func (s *state13) LockedFunds() (LockedFunds, error) {
return LockedFunds{
VestingFunds: s.State.LockedFunds,
InitialPledgeRequirement: s.State.InitialPledge,
PreCommitDeposits: s.State.PreCommitDeposits,
}, nil
}
func (s *state13) FeeDebt() (abi.TokenAmount, error) {
return s.State.FeeDebt, nil
}
func (s *state13) InitialPledge() (abi.TokenAmount, error) {
return s.State.InitialPledge, nil
}
func (s *state13) PreCommitDeposits() (abi.TokenAmount, error) {
return s.State.PreCommitDeposits, nil
}
// Returns nil, nil if sector is not found
func (s *state13) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
info, ok, err := s.State.GetSector(s.store, num)
if !ok || err != nil {
return nil, err
}
ret := fromV13SectorOnChainInfo(*info)
return &ret, nil
}
func (s *state13) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
if err != nil {
return nil, err
}
return &SectorLocation{
Deadline: dlIdx,
Partition: partIdx,
}, nil
}
func (s *state13) NumLiveSectors() (uint64, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return 0, err
}
var total uint64
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner13.Deadline) error {
total += dl.LiveSectors
return nil
}); err != nil {
return 0, err
}
return total, nil
}
// GetSectorExpiration returns the effective expiration of the given sector.
//
// If the sector does not expire early, the Early expiration field is 0.
func (s *state13) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return nil, err
}
// NOTE: this can be optimized significantly.
// 1. If the sector is non-faulty, it will expire on-time (can be
// learned from the sector info).
// 2. If it's faulty, it will expire early within the first 42 entries
// of the expiration queue.
stopErr := errors.New("stop")
out := SectorExpiration{}
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner13.Deadline) error {
partitions, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
quant := s.State.QuantSpecForDeadline(dlIdx)
var part miner13.Partition
return partitions.ForEach(&part, func(partIdx int64) error {
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
return err
} else if !found {
return nil
}
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
return err
} else if found {
// already terminated
return stopErr
}
q, err := miner13.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner13.PartitionExpirationAmtBitwidth)
if err != nil {
return err
}
var exp miner13.ExpirationSet
return q.ForEach(&exp, func(epoch int64) error {
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
return err
} else if early {
out.Early = abi.ChainEpoch(epoch)
return nil
}
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
return err
} else if onTime {
out.OnTime = abi.ChainEpoch(epoch)
return stopErr
}
return nil
})
})
})
if err == stopErr {
err = nil
}
if err != nil {
return nil, err
}
if out.Early == 0 && out.OnTime == 0 {
return nil, xerrors.Errorf("failed to find sector %d", num)
}
return &out, nil
}
func (s *state13) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
if !ok || err != nil {
return nil, err
}
ret := fromV13SectorPreCommitOnChainInfo(*info)
return &ret, nil
}
func (s *state13) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt13.AsMap(s.store, s.State.PreCommittedSectors, builtin13.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner13.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV13SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state13) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner13.LoadSectors(s.store, s.State.Sectors)
if err != nil {
return nil, err
}
// If no sector numbers are specified, load all.
if snos == nil {
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
var info13 miner13.SectorOnChainInfo
if err := sectors.ForEach(&info13, func(_ int64) error {
info := fromV13SectorOnChainInfo(info13)
infos = append(infos, &info)
return nil
}); err != nil {
return nil, err
}
return infos, nil
}
// Otherwise, load selected.
infos13, err := sectors.Load(*snos)
if err != nil {
return nil, err
}
infos := make([]*SectorOnChainInfo, len(infos13))
for i, info13 := range infos13 {
info := fromV13SectorOnChainInfo(*info13)
infos[i] = &info
}
return infos, nil
}
func (s *state13) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state13) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err
}
return allocatedSectors.IsSet(uint64(num))
}
func (s *state13) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state13) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state13) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state13) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return nil, err
}
dl, err := dls.LoadDeadline(s.store, idx)
if err != nil {
return nil, err
}
return &deadline13{*dl, s.store}, nil
}
func (s *state13) ForEachDeadline(cb func(uint64, Deadline) error) error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
return dls.ForEach(s.store, func(i uint64, dl *miner13.Deadline) error {
return cb(i, &deadline13{*dl, s.store})
})
}
func (s *state13) NumDeadlines() (uint64, error) {
return miner13.WPoStPeriodDeadlines, nil
}
func (s *state13) DeadlinesChanged(other State) (bool, error) {
other13, ok := other.(*state13)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Deadlines.Equals(other13.Deadlines), nil
}
func (s *state13) MinerInfoChanged(other State) (bool, error) {
other0, ok := other.(*state13)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !s.State.Info.Equals(other0.State.Info), nil
}
func (s *state13) Info() (MinerInfo, error) {
info, err := s.State.GetInfo(s.store)
if err != nil {
return MinerInfo{}, err
}
mi := MinerInfo{
Owner: info.Owner,
Worker: info.Worker,
ControlAddresses: info.ControlAddresses,
PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey),
PeerId: info.PeerId,
Multiaddrs: info.Multiaddrs,
WindowPoStProofType: info.WindowPoStProofType,
SectorSize: info.SectorSize,
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
ConsensusFaultElapsed: info.ConsensusFaultElapsed,
Beneficiary: info.Beneficiary,
BeneficiaryTerm: BeneficiaryTerm(info.BeneficiaryTerm),
PendingBeneficiaryTerm: (*PendingBeneficiaryChange)(info.PendingBeneficiaryTerm),
}
return mi, nil
}
func (s *state13) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
return s.State.RecordedDeadlineInfo(epoch), nil
}
func (s *state13) DeadlineCronActive() (bool, error) {
return s.State.DeadlineCronActive, nil
}
func (s *state13) sectors() (adt.Array, error) {
return adt13.AsArray(s.store, s.Sectors, miner13.SectorsAmtBitwidth)
}
func (s *state13) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
var si miner13.SectorOnChainInfo
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return SectorOnChainInfo{}, err
}
return fromV13SectorOnChainInfo(si), nil
}
func (s *state13) precommits() (adt.Map, error) {
return adt13.AsMap(s.store, s.PreCommittedSectors, builtin13.DefaultHamtBitwidth)
}
func (s *state13) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
var sp miner13.SectorPreCommitOnChainInfo
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
if err != nil {
return SectorPreCommitOnChainInfo{}, err
}
return fromV13SectorPreCommitOnChainInfo(sp), nil
}
func (s *state13) EraseAllUnproven() error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner13.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner13.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
if err != nil {
return err
}
return s.State.SaveDeadlines(s.store, dls)
}
func (d *deadline13) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil {
return nil, err
}
return &partition13{*p, d.store}, nil
}
func (d *deadline13) ForEachPartition(cb func(uint64, Partition) error) error {
ps, err := d.Deadline.PartitionsArray(d.store)
if err != nil {
return err
}
var part miner13.Partition
return ps.ForEach(&part, func(i int64) error {
return cb(uint64(i), &partition13{part, d.store})
})
}
func (d *deadline13) PartitionsChanged(other Deadline) (bool, error) {
other13, ok := other.(*deadline13)
if !ok {
// treat an upgrade as a change, always
return true, nil
}
return !d.Deadline.Partitions.Equals(other13.Deadline.Partitions), nil
}
func (d *deadline13) PartitionsPoSted() (bitfield.BitField, error) {
return d.Deadline.PartitionsPoSted, nil
}
func (d *deadline13) DisputableProofCount() (uint64, error) {
ops, err := d.OptimisticProofsSnapshotArray(d.store)
if err != nil {
return 0, err
}
return ops.Length(), nil
}
func (p *partition13) AllSectors() (bitfield.BitField, error) {
return p.Partition.Sectors, nil
}
func (p *partition13) FaultySectors() (bitfield.BitField, error) {
return p.Partition.Faults, nil
}
func (p *partition13) RecoveringSectors() (bitfield.BitField, error) {
return p.Partition.Recoveries, nil
}
func (p *partition13) UnprovenSectors() (bitfield.BitField, error) {
return p.Partition.Unproven, nil
}
func fromV13SectorOnChainInfo(v13 miner13.SectorOnChainInfo) SectorOnChainInfo {
info := SectorOnChainInfo{
SectorNumber: v13.SectorNumber,
SealProof: v13.SealProof,
SealedCID: v13.SealedCID,
DealIDs: v13.DealIDs,
Activation: v13.Activation,
Expiration: v13.Expiration,
DealWeight: v13.DealWeight,
VerifiedDealWeight: v13.VerifiedDealWeight,
InitialPledge: v13.InitialPledge,
ExpectedDayReward: v13.ExpectedDayReward,
ExpectedStoragePledge: v13.ExpectedStoragePledge,
SectorKeyCID: v13.SectorKeyCID,
}
return info
}
func fromV13SectorPreCommitOnChainInfo(v13 miner13.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
ret := SectorPreCommitOnChainInfo{
Info: SectorPreCommitInfo{
SealProof: v13.Info.SealProof,
SectorNumber: v13.Info.SectorNumber,
SealedCID: v13.Info.SealedCID,
SealRandEpoch: v13.Info.SealRandEpoch,
DealIDs: v13.Info.DealIDs,
Expiration: v13.Info.Expiration,
UnsealedCid: nil,
},
PreCommitDeposit: v13.PreCommitDeposit,
PreCommitEpoch: v13.PreCommitEpoch,
}
ret.Info.UnsealedCid = v13.Info.UnsealedCid
return ret
}
func (s *state13) GetState() interface{} {
return &s.State
}
func (s *state13) ActorKey() string {
return manifest.MinerKey
}
func (s *state13) ActorVersion() actorstypes.Version {
return actorstypes.Version13
}
func (s *state13) Code() cid.Cid {
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
if !ok {
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
}
return code
}

View File

@ -61,7 +61,7 @@ func (s *state2) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state6) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state8) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state9) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesn't have enough funds to cover their locked pledge // this panics if the miner doesnt have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -8,7 +8,7 @@ import (
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin" builtintypes "github.com/filecoin-project/go-state-types/builtin"
multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig" multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig"
init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -57,7 +57,7 @@ func (m message10) Create(
} }
// new actors are created by invoking 'exec' on the init actor with the constructor params // new actors are created by invoking 'exec' on the init actor with the constructor params
execParams := &init13.ExecParams{ execParams := &init12.ExecParams{
CodeCID: code, CodeCID: code,
ConstructorParams: enc, ConstructorParams: enc,
} }

View File

@ -8,7 +8,7 @@ import (
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin" builtintypes "github.com/filecoin-project/go-state-types/builtin"
multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig" multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig"
init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -57,7 +57,7 @@ func (m message11) Create(
} }
// new actors are created by invoking 'exec' on the init actor with the constructor params // new actors are created by invoking 'exec' on the init actor with the constructor params
execParams := &init13.ExecParams{ execParams := &init12.ExecParams{
CodeCID: code, CodeCID: code,
ConstructorParams: enc, ConstructorParams: enc,
} }

View File

@ -7,8 +7,8 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin" builtintypes "github.com/filecoin-project/go-state-types/builtin"
init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig" multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
init13 "github.com/filecoin-project/go-state-types/builtin/v13/init"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
@ -57,7 +57,7 @@ func (m message12) Create(
} }
// new actors are created by invoking 'exec' on the init actor with the constructor params // new actors are created by invoking 'exec' on the init actor with the constructor params
execParams := &init13.ExecParams{ execParams := &init12.ExecParams{
CodeCID: code, CodeCID: code,
ConstructorParams: enc, ConstructorParams: enc,
} }

View File

@ -1,77 +0,0 @@
package multisig
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
builtintypes "github.com/filecoin-project/go-state-types/builtin"
init13 "github.com/filecoin-project/go-state-types/builtin/v13/init"
multisig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors"
init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init"
"github.com/filecoin-project/lotus/chain/types"
)
type message13 struct{ message0 }
func (m message13) Create(
signers []address.Address, threshold uint64,
unlockStart, unlockDuration abi.ChainEpoch,
initialAmount abi.TokenAmount,
) (*types.Message, error) {
lenAddrs := uint64(len(signers))
if lenAddrs < threshold {
return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig")
}
if threshold == 0 {
threshold = lenAddrs
}
if m.from == address.Undef {
return nil, xerrors.Errorf("must provide source address")
}
// Set up constructor parameters for multisig
msigParams := &multisig13.ConstructorParams{
Signers: signers,
NumApprovalsThreshold: threshold,
UnlockDuration: unlockDuration,
StartEpoch: unlockStart,
}
enc, actErr := actors.SerializeParams(msigParams)
if actErr != nil {
return nil, actErr
}
code, ok := actors.GetActorCodeID(actorstypes.Version13, manifest.MultisigKey)
if !ok {
return nil, xerrors.Errorf("failed to get multisig code ID")
}
// new actors are created by invoking 'exec' on the init actor with the constructor params
execParams := &init13.ExecParams{
CodeCID: code,
ConstructorParams: enc,
}
enc, actErr = actors.SerializeParams(execParams)
if actErr != nil {
return nil, actErr
}
return &types.Message{
To: init_.Address,
From: m.from,
Method: builtintypes.MethodsInit.Exec,
Params: enc,
Value: initialAmount,
}, nil
}

Some files were not shown because too many files have changed in this diff Show More