Merge branch 'master' into feat/sturdypost

This commit is contained in:
Andrew Jackson (Ajax) 2023-11-06 16:10:57 -06:00
commit 848c20d15f
200 changed files with 5609 additions and 1219 deletions

View File

@ -933,6 +933,8 @@ workflows:
- build - build
suite: itest-sector_pledge suite: itest-sector_pledge
target: "./itests/sector_pledge_test.go" target: "./itests/sector_pledge_test.go"
get-params: true
- test: - test:
name: test-itest-sector_terminate name: test-itest-sector_terminate
requires: requires:
@ -1016,6 +1018,7 @@ workflows:
suite: utest-unit-cli suite: utest-unit-cli
target: "./cli/... ./cmd/... ./api/..." target: "./cli/... ./cmd/... ./api/..."
get-params: true get-params: true
executor: golang-2xl
- test: - test:
name: test-unit-node name: test-unit-node
requires: requires:
@ -1023,6 +1026,7 @@ workflows:
suite: utest-unit-node suite: utest-unit-node
target: "./node/..." target: "./node/..."
- test: - test:
name: test-unit-rest name: test-unit-rest
requires: requires:
@ -1030,6 +1034,7 @@ workflows:
suite: utest-unit-rest suite: utest-unit-rest
target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./provider/... ./tools/..." target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./provider/... ./tools/..."
executor: golang-2xl
- test: - test:
name: test-unit-storage name: test-unit-storage
requires: requires:
@ -1037,6 +1042,7 @@ workflows:
suite: utest-unit-storage suite: utest-unit-storage
target: "./storage/... ./extern/..." target: "./storage/... ./extern/..."
- test: - test:
go-test-flags: "-run=TestMulticoreSDR" go-test-flags: "-run=TestMulticoreSDR"
requires: requires:

View File

@ -554,7 +554,7 @@ workflows:
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]] [[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]]
resource_class: 2xlarge resource_class: 2xlarge
[[- end]] [[- end]]
[[- if (eq $name "wdpost")]] [[- if or (eq $name "wdpost") (eq $name "sector_pledge")]]
get-params: true get-params: true
[[end]] [[end]]
[[- end ]][[- end]] [[- end ]][[- end]]
@ -567,6 +567,8 @@ workflows:
suite: utest-[[ $suite ]] suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]" target: "[[ $pkgs ]]"
[[if eq $suite "unit-cli"]]get-params: true[[end]] [[if eq $suite "unit-cli"]]get-params: true[[end]]
[[if eq $suite "unit-cli"]]executor: golang-2xl[[end]]
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
[[- end]] [[- end]]
- test: - test:
go-test-flags: "-run=TestMulticoreSDR" go-test-flags: "-run=TestMulticoreSDR"

1
.gitignore vendored
View File

@ -42,6 +42,7 @@ build/paramfetch.sh
bin/ipget bin/ipget
bin/tmp/* bin/tmp/*
.idea .idea
.vscode
scratchpad scratchpad
build/builtin-actors/v* build/builtin-actors/v*

View File

@ -1,6 +1,8 @@
# Lotus changelog # Lotus changelog
# UNRELEASED # UNRELEASED
- chore: Auto remove local chain data when importing chain file or snapshot ([filecoin-project/lotus#11277](https://github.com/filecoin-project/lotus/pull/11277))
- feat: metric: export Mpool message count ([filecoin-project/lotus#11361](https://github.com/filecoin-project/lotus/pull/11361))
## New features ## New features
- feat: Added new tracing API (**HIGHLY EXPERIMENTAL**) supporting two RPC methods: `trace_block` and `trace_replayBlockTransactions` ([filecoin-project/lotus#11100](https://github.com/filecoin-project/lotus/pull/11100)) - feat: Added new tracing API (**HIGHLY EXPERIMENTAL**) supporting two RPC methods: `trace_block` and `trace_replayBlockTransactions` ([filecoin-project/lotus#11100](https://github.com/filecoin-project/lotus/pull/11100))

View File

@ -73,7 +73,7 @@ All releases under an odd minor version number indicate **feature releases**. Th
Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number. Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number.
We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the Release Cycle section (TODO: Link). We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the [Release Cycle section](#release-cycle).
### Examples Scenarios ### Examples Scenarios

View File

@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l
#### Go #### Go
To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/): To build Lotus, you need a working installation of [Go 1.20.10 or higher](https://golang.org/dl/):
```bash ```bash
wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local wget -c https://golang.org/dl/go1.20.10.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
``` ```
**TIP:** **TIP:**
@ -133,6 +133,8 @@ Note: The default branch `master` is the dev branch where the latest new feature
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain). 6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain).
7. (Optional) Follow the [Setting Up Prometheus and Grafana](https://github.com/filecoin-project/lotus/tree/master/metrics/README.md) guide for detailed instructions on setting up a working monitoring system running against a local running lotus node.
## License ## License
Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE)

View File

@ -21,7 +21,6 @@ import (
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin/v8/paych" "github.com/filecoin-project/go-state-types/builtin/v8/paych"
"github.com/filecoin-project/go-state-types/builtin/v9/market" "github.com/filecoin-project/go-state-types/builtin/v9/market"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
@ -29,7 +28,7 @@ import (
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
@ -499,9 +498,9 @@ type FullNode interface {
// expiration epoch // expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
// StateSectorExpiration returns epoch at which given sector will expire // StateSectorExpiration returns epoch at which given sector will expire
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*lminer.SectorExpiration, error) //perm:read StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
// StateSectorPartition finds deadline/partition with the specified sector // StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*lminer.SectorLocation, error) //perm:read StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
// StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed // StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
// //
// NOTE: If a replacing message is found on chain, this method will return // NOTE: If a replacing message is found on chain, this method will return

View File

@ -9,11 +9,11 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"

View File

@ -19,10 +19,10 @@ import (
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin/v9/market" "github.com/filecoin-project/go-state-types/builtin/v9/market"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
abinetwork "github.com/filecoin-project/go-state-types/network" abinetwork "github.com/filecoin-project/go-state-types/network"
builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/pipeline/sealiface"
"github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/fsutil"
@ -461,10 +461,15 @@ type SectorOffset struct {
// DealInfo is a tuple of deal identity and its schedule // DealInfo is a tuple of deal identity and its schedule
type PieceDealInfo struct { type PieceDealInfo struct {
// "Old" builtin-market deal info
PublishCid *cid.Cid PublishCid *cid.Cid
DealID abi.DealID DealID abi.DealID
DealProposal *market.DealProposal DealProposal *market.DealProposal
// Common deal info
DealSchedule DealSchedule DealSchedule DealSchedule
// Best-effort deal asks
KeepUnsealed bool KeepUnsealed bool
} }

View File

@ -42,7 +42,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Channel"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Channel"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Channel")); err != nil { if _, err := cw.WriteString(string("Channel")); err != nil {
return err return err
} }
@ -58,7 +58,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Vouchers"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Vouchers"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Vouchers")); err != nil { if _, err := cw.WriteString(string("Vouchers")); err != nil {
return err return err
} }
@ -83,7 +83,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil { if _, err := cw.WriteString(string("WaitSentinel")); err != nil {
return err return err
} }
@ -163,13 +163,32 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v paych.SignedVoucher {
if err := v.UnmarshalCBOR(cr); err != nil {
return err b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Vouchers[i] = new(paych.SignedVoucher)
if err := t.Vouchers[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Vouchers[i] pointer: %w", err)
}
}
}
} }
t.Vouchers[i] = &v
} }
// t.WaitSentinel (cid.Cid) (struct) // t.WaitSentinel (cid.Cid) (struct)
@ -214,7 +233,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Size")); err != nil { if _, err := cw.WriteString(string("Size")); err != nil {
return err return err
} }
@ -230,7 +249,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Offset"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Offset"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Offset")); err != nil { if _, err := cw.WriteString(string("Offset")); err != nil {
return err return err
} }
@ -246,7 +265,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("SectorID")); err != nil { if _, err := cw.WriteString(string("SectorID")); err != nil {
return err return err
} }
@ -369,7 +388,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Refs"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Refs"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Refs")); err != nil { if _, err := cw.WriteString(string("Refs")); err != nil {
return err return err
} }
@ -447,13 +466,22 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v SealedRef {
if err := v.UnmarshalCBOR(cr); err != nil {
return err if err := t.Refs[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Refs[i]: %w", err)
}
}
} }
t.Refs[i] = v
} }
default: default:
@ -484,7 +512,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Epoch")); err != nil { if _, err := cw.WriteString(string("Epoch")); err != nil {
return err return err
} }
@ -506,7 +534,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Value")); err != nil { if _, err := cw.WriteString(string("Value")); err != nil {
return err return err
} }
@ -639,7 +667,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Epoch")); err != nil { if _, err := cw.WriteString(string("Epoch")); err != nil {
return err return err
} }
@ -661,7 +689,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Value")); err != nil { if _, err := cw.WriteString(string("Value")); err != nil {
return err return err
} }
@ -794,7 +822,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("DealID")); err != nil { if _, err := cw.WriteString(string("DealID")); err != nil {
return err return err
} }
@ -810,7 +838,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("PublishCid")); err != nil { if _, err := cw.WriteString(string("PublishCid")); err != nil {
return err return err
} }
@ -832,7 +860,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("DealProposal")); err != nil { if _, err := cw.WriteString(string("DealProposal")); err != nil {
return err return err
} }
@ -848,7 +876,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("DealSchedule")); err != nil { if _, err := cw.WriteString(string("DealSchedule")); err != nil {
return err return err
} }
@ -864,7 +892,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { if _, err := cw.WriteString(string("KeepUnsealed")); err != nil {
return err return err
} }
@ -1027,7 +1055,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("Piece")); err != nil { if _, err := cw.WriteString(string("Piece")); err != nil {
return err return err
} }
@ -1043,7 +1071,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealInfo"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealInfo"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("DealInfo")); err != nil { if _, err := cw.WriteString(string("DealInfo")); err != nil {
return err return err
} }
@ -1150,7 +1178,7 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("EndEpoch")); err != nil { if _, err := cw.WriteString(string("EndEpoch")); err != nil {
return err return err
} }
@ -1172,7 +1200,7 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string("StartEpoch")); err != nil { if _, err := cw.WriteString(string("StartEpoch")); err != nil {
return err return err
} }

View File

@ -26,7 +26,6 @@ import (
"github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v8/paych" "github.com/filecoin-project/go-state-types/builtin/v8/paych"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
@ -35,7 +34,7 @@ import (
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/journal/alerting"
@ -564,11 +563,11 @@ type FullNodeMethods struct {
StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"` StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"`
StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"` StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"` StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"`
StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) `perm:"read"` StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
@ -3829,14 +3828,14 @@ func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
if s.Internal.StateSectorExpiration == nil { if s.Internal.StateSectorExpiration == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
return s.Internal.StateSectorExpiration(p0, p1, p2, p3) return s.Internal.StateSectorExpiration(p0, p1, p2, p3)
} }
func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
@ -3851,14 +3850,14 @@ func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
if s.Internal.StateSectorPartition == nil { if s.Internal.StateSectorPartition == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
return s.Internal.StateSectorPartition(p0, p1, p2, p3) return s.Internal.StateSectorPartition(p0, p1, p2, p3)
} }
func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
return nil, ErrNotSupported return nil, ErrNotSupported
} }

View File

@ -19,8 +19,8 @@ import (
datatransfer "github.com/filecoin-project/go-data-transfer/v2" datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
) )
@ -56,9 +56,17 @@ type PubsubScore struct {
Score *pubsub.PeerScoreSnapshot Score *pubsub.PeerScoreSnapshot
} }
// MessageSendSpec contains optional fields which modify message sending behavior
type MessageSendSpec struct { type MessageSendSpec struct {
MaxFee abi.TokenAmount // MaxFee specifies a cap on network fees related to this message
MaxFee abi.TokenAmount
// MsgUuid specifies a unique message identifier which can be used on node (or node cluster)
// level to prevent double-sends of messages even when nonce generation is not handled by sender
MsgUuid uuid.UUID MsgUuid uuid.UUID
// MaximizeFeeCap makes message FeeCap be based entirely on MaxFee
MaximizeFeeCap bool
} }
type MpoolMessageWhole struct { type MpoolMessageWhole struct {

View File

@ -15,7 +15,6 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v8/paych" "github.com/filecoin-project/go-state-types/builtin/v8/paych"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
@ -23,7 +22,7 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers" marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
@ -450,9 +449,9 @@ type FullNode interface {
// expiration epoch // expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
// StateSectorExpiration returns epoch at which given sector will expire // StateSectorExpiration returns epoch at which given sector will expire
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*lminer.SectorExpiration, error) //perm:read StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
// StateSectorPartition finds deadline/partition with the specified sector // StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*lminer.SectorLocation, error) //perm:read StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
// //
// NOTE: If a replacing message is found on chain, this method will return // NOTE: If a replacing message is found on chain, this method will return

View File

@ -8,12 +8,12 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network" abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
) )

View File

@ -17,7 +17,6 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v8/paych" "github.com/filecoin-project/go-state-types/builtin/v8/paych"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
@ -25,7 +24,7 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers" marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
@ -355,11 +354,11 @@ type FullNodeMethods struct {
StateSearchMsgLimited func(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` StateSearchMsgLimited func(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"`
StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"` StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"`
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"`
StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"` StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"`
StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
@ -2245,14 +2244,14 @@ func (s *FullNodeStub) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
if s.Internal.StateSectorExpiration == nil { if s.Internal.StateSectorExpiration == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
return s.Internal.StateSectorExpiration(p0, p1, p2, p3) return s.Internal.StateSectorExpiration(p0, p1, p2, p3)
} }
func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
@ -2267,14 +2266,14 @@ func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
if s.Internal.StateSectorPartition == nil { if s.Internal.StateSectorPartition == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
return s.Internal.StateSectorPartition(p0, p1, p2, p3) return s.Internal.StateSectorPartition(p0, p1, p2, p3)
} }
func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) {
return nil, ErrNotSupported return nil, ErrNotSupported
} }

View File

@ -12,11 +12,11 @@ import (
"github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers" marketevents "github.com/filecoin-project/lotus/markets/loggers"
) )

View File

@ -52,9 +52,11 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range t.Cid { for _, v := range t.Cid {
if err := cbg.WriteCid(w, v); err != nil {
return xerrors.Errorf("failed writing cid field t.Cid: %w", err) if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
} }
} }
// t.Data ([][]uint8) (slice) // t.Data ([][]uint8) (slice)
@ -151,12 +153,25 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
c, err := cbg.ReadCid(cr) {
if err != nil {
return xerrors.Errorf("reading cid field t.Cid failed: %w", err) c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Cid[i]: %w", err)
}
t.Cid[i] = c
}
} }
t.Cid[i] = c
} }
// t.Data ([][]uint8) (slice) // t.Data ([][]uint8) (slice)
@ -183,6 +198,9 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
var maj byte var maj byte
var extra uint64 var extra uint64
var err error var err error
_ = maj
_ = extra
_ = err
maj, extra, err = cr.ReadHeader() maj, extra, err = cr.ReadHeader()
if err != nil { if err != nil {
@ -350,7 +368,7 @@ func (t *NetRpcErr) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Msg))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Msg))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string(t.Msg)); err != nil { if _, err := cw.WriteString(string(t.Msg)); err != nil {
return err return err
} }

View File

@ -183,3 +183,17 @@ func (b *idstore) Close() error {
func (b *idstore) Flush(ctx context.Context) error { func (b *idstore) Flush(ctx context.Context) error {
return b.bs.Flush(ctx) return b.bs.Flush(ctx)
} }
func (b *idstore) CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error {
if bs, ok := b.bs.(BlockstoreGC); ok {
return bs.CollectGarbage(ctx, options...)
}
return xerrors.Errorf("not supported")
}
func (b *idstore) GCOnce(ctx context.Context, options ...BlockstoreGCOption) error {
if bs, ok := b.bs.(BlockstoreGCOnce); ok {
return bs.GCOnce(ctx, options...)
}
return xerrors.Errorf("not supported")
}

View File

@ -66,8 +66,9 @@ var (
) )
const ( const (
batchSize = 16384 batchSize = 16384
cidKeySize = 128 cidKeySize = 128
purgeWorkSliceDuration = time.Second
) )
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
@ -1372,9 +1373,21 @@ func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet
return err return err
} }
now := time.Now()
err := coldr.ForEach(func(c cid.Cid) error { err := coldr.ForEach(func(c cid.Cid) error {
batch = append(batch, c) batch = append(batch, c)
if len(batch) == batchSize { if len(batch) == batchSize {
// add some time slicing to the purge as this a very disk I/O heavy operation that
// requires write access to txnLk that may starve other operations that require
// access to the blockstore.
elapsed := time.Since(now)
if elapsed > purgeWorkSliceDuration {
// work 1 slice, sleep 4 slices, or 20% utilization
time.Sleep(4 * elapsed)
now = time.Now()
}
return deleteBatch() return deleteBatch()
} }

View File

@ -1,2 +1,2 @@
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWD5mtdmjHQ1Puj9Md7SEfoa7kWMpwqUhAKsyYsBP56LQC /dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWRaoPgwJuZdPSN4A2iTeh8xzkZGCEBxan9vMkidHisUgn
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWEoYPkm6o87ES6AppFY7d7WHJUQg7XVPRAyQZjEU31efQ /dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWMjLCZeEf3VzSWvQYuhe9VzCcrN6RENX9FgmQqiJfQDWs

View File

@ -1,9 +1,6 @@
/dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj /dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj
/dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc /dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc
/dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4 /dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4
/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R
/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc
/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH
/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ /dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ
/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf /dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf
/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR /dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR
@ -13,4 +10,5 @@
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt /dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
/dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d /dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d
/dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP /dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP
/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt /dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt
/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST

View File

@ -42,6 +42,12 @@ func init() {
if err := loadManifests(NetworkBundle); err != nil { if err := loadManifests(NetworkBundle); err != nil {
panic(err) panic(err)
} }
// The following code cid existed temporarily on the calibnet testnet, as a "buggy" storage miner actor implementation.
// We include it in our builtin bundle, but intentionally omit from metadata.
if NetworkBundle == "calibrationnet" {
actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq"), actorstypes.Version12)
}
} }
// UseNetworkBundle switches to a different network bundle, by name. // UseNetworkBundle switches to a different network bundle, by name.
@ -183,6 +189,12 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
if err != nil { if err != nil {
return nil, xerrors.Errorf("error loading builtin actors bundle: %w", err) return nil, xerrors.Errorf("error loading builtin actors bundle: %w", err)
} }
// The following manifest cid existed temporarily on the calibnet testnet
// We include it in our builtin bundle, but intentionally omit from metadata
if root == cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs") {
continue
}
bundles = append(bundles, &BuiltinActorsMetadata{ bundles = append(bundles, &BuiltinActorsMetadata{
Network: name, Network: name,
Version: actorstypes.Version(version), Version: actorstypes.Version(version),
@ -232,7 +244,7 @@ func readBundleManifest(r io.Reader) (cid.Cid, map[string]cid.Cid, error) {
} }
// GetEmbeddedBuiltinActorsBundle returns the builtin-actors bundle for the given actors version. // GetEmbeddedBuiltinActorsBundle returns the builtin-actors bundle for the given actors version.
func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version) ([]byte, bool) { func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version, networkBundleName string) ([]byte, bool) {
fi, err := embeddedBuiltinActorReleases.Open(fmt.Sprintf("actors/v%d.tar.zst", version)) fi, err := embeddedBuiltinActorReleases.Open(fmt.Sprintf("actors/v%d.tar.zst", version))
if err != nil { if err != nil {
return nil, false return nil, false
@ -243,7 +255,7 @@ func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version) ([]byte, bool)
defer uncompressed.Close() //nolint defer uncompressed.Close() //nolint
tarReader := tar.NewReader(uncompressed) tarReader := tar.NewReader(uncompressed)
targetFileName := fmt.Sprintf("builtin-actors-%s.car", NetworkBundle) targetFileName := fmt.Sprintf("builtin-actors-%s.car", networkBundleName)
for { for {
header, err := tarReader.Next() header, err := tarReader.Next()
switch err { switch err {

View File

@ -97,25 +97,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
}, { }, {
Network: "butterflynet", Network: "butterflynet",
Version: 12, Version: 12,
BundleGitTag: "v11.0.0", BundleGitTag: "v12.0.0-rc.2",
ManifestCid: MustParseCid("bafy2bzaceaiy4dsxxus5xp5n5i4tjzkb7sc54mjz7qnk2efhgmsrobjesxnza"), ManifestCid: MustParseCid("bafy2bzaceabeegs5mhrxwqodyt7u2ulxr2jg6eh5g3545ganqzkncnxzserue"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacecfdqb7p3jakhaa3cqnzpt7hxmhghrbxvafsylqno3febx55fnidw"), "account": MustParseCid("bafk2bzacebp7anjdtg2sohyt6lromx4xs7nujtwdfcsffnptphaayabx7ysxs"),
"cron": MustParseCid("bafk2bzaceavmqu2qihgbe3xdaotgypuzvdpiifnm7ll6rolks2u4lac6voosk"), "cron": MustParseCid("bafk2bzacecu2y3awtemmglpkroiglulc2fj3gpdn6eazdqr6avcautiaighrg"),
"datacap": MustParseCid("bafk2bzacealtvh65rzb34fmyzw4m2np2htnio4w3pn4alzqovwxkdbf23dvpo"), "datacap": MustParseCid("bafk2bzacebbh5aynu3v3fluqqrcdsphleodoig42xkid2ccwdnff3avhbdop4"),
"eam": MustParseCid("bafk2bzacedko6hcjmwpuwgma5pb4gr2wgyvregk3nqqjxit7dv4es6vh5cjoc"), "eam": MustParseCid("bafk2bzacebzwt4v4hqoltiblhliwrnttxpr2dggbu3wsrvq4pwzisp7idu5w4"),
"ethaccount": MustParseCid("bafk2bzacedhcei2xnr34poxr4xziypm2obqlibke4cs2cjfnr3sz6nf6h7fyy"), "ethaccount": MustParseCid("bafk2bzaceb5f6vgjkl7ic6ry5sjspqm2iij6qlcdovwi3haodb7wn37pgebii"),
"evm": MustParseCid("bafk2bzacebn5lwxboiikhz67ajwa34v2lc4qevnhpwdnipbmrnutkvrrqkb46"), "evm": MustParseCid("bafk2bzacebygt6zh6p52rkg2ugehm4k5yuu6f56i2pu6ywrmjez4n4zsje4p4"),
"init": MustParseCid("bafk2bzacea6vw4esh5tg7mprv5jkbx5xcyilcy4vvf64lss32mjyuvv2mh5ng"), "init": MustParseCid("bafk2bzaceagyf3pwsthod7klfi25ow2zf2i5isfrrgr5ua3lvkgfojalrdbhw"),
"multisig": MustParseCid("bafk2bzacedq2afnwcfipay5twv5mgzjoio5bbjvyo4yqchdwqcr7wrareyx54"), "multisig": MustParseCid("bafk2bzacedgfo5mw2zqjwi37lah27sfxj4cw2abylgtxf3ucep4dyhgnppmqe"),
"paymentchannel": MustParseCid("bafk2bzacebbsvr7i7mqmaadyjibe5wxnv7bwvvec2wlgknuwda6ep45amnd5w"), "paymentchannel": MustParseCid("bafk2bzacebm37tgu52cgzmiln6iip6etfmq73fd3qqz2j5gxlhtvachs7kw4c"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzaceafuh6idvaqqkj353vs4qdl42tcmvnymewu5zf4rq2nruxdyunses"), "reward": MustParseCid("bafk2bzacedebvitdsztwebi44t5es4ls3p3hor252igzawr3s6uznmbvzh2ou"),
"storagemarket": MustParseCid("bafk2bzaceb7bx4honi3byjllpdk6fea32dpu3vqvil3okodybdk5m3erlnwjw"), "storagemarket": MustParseCid("bafk2bzaceb2tdeqtt2eqpzeb3gezuchb7g7uzbd52bgvcdt6bg3ckq7oisb74"),
"storageminer": MustParseCid("bafk2bzacebxjhofdr3sb2uhy2ky2vcijh4nhmwkh5xijtbgk6dzkknji2kn7a"), "storageminer": MustParseCid("bafk2bzacea2jzzulmz6ktj6vndjheybz2io3vznnhsb32zberpaqeo7s7xa56"),
"storagepower": MustParseCid("bafk2bzaceabskmmkas6njbowols7t4ib3bipa5abpomk3jtgfwojtzd7mjzfm"), "storagepower": MustParseCid("bafk2bzacedxvlj5xmhytdjrjqyonz37duvxb2ioyzk75c27yypkqalxuh3xh6"),
"system": MustParseCid("bafk2bzacedtuh7cht3fud7fb4avl4g2zbz57lc4ohiaufpaex6dkmdokn5rgo"), "system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"),
"verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"), "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"),
}, },
}, { }, {
Network: "calibrationnet", Network: "calibrationnet",
@ -203,25 +203,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
}, { }, {
Network: "calibrationnet", Network: "calibrationnet",
Version: 12, Version: 12,
BundleGitTag: "v11.0.0", BundleGitTag: "v12.0.0-rc.2",
ManifestCid: MustParseCid("bafy2bzacec5fl7crmxyw234qsmijvffhssgqwuphyaejulbryhel2pxxrxgey"), ManifestCid: MustParseCid("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacecrjovyiuh4jryepy4pxddzqjyrg2hfinxzbv37bpzlci54r5mkr6"), "account": MustParseCid("bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6"),
"cron": MustParseCid("bafk2bzacedy76woxmtalmsuaji4kog6wmq4h2kcgcyv5wpxbdz7f2ls2tjjmw"), "cron": MustParseCid("bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6"),
"datacap": MustParseCid("bafk2bzacec2inqddxpfm3rufwqr752d2ok3ve4cxfhmloyosy6rj2krtkpwus"), "datacap": MustParseCid("bafk2bzacecq5ppfskxgv3iea3jarsix6jdduuhwsn4fbvngtbmzelzmlygorm"),
"eam": MustParseCid("bafk2bzacea6sxno66egkqz5rqjq4e22obkeblxl7e3funjifljuinmrc2ztzg"), "eam": MustParseCid("bafk2bzacecb6cnwftvavpph4p34zs4psuy5xvbrhf7vszkva4npw6mw3c42xe"),
"ethaccount": MustParseCid("bafk2bzacecdsvs7xm3ncm66lsjqh65uzhr3rmu3dlux7qzdgpg737r4kslhxm"), "ethaccount": MustParseCid("bafk2bzaceajmc3y3sedsqymfla3dzzqzmbu5kmr2iskm26ga2u34ll5fpztfw"),
"evm": MustParseCid("bafk2bzaceaz3b66m2znt27clmbp2zi5jsobw6g2x6fiezynyijgtkehgqhq3a"), "evm": MustParseCid("bafk2bzaced4sozr7m6rzcgpobzeiupghthfw6afumysu3oz6bxxirv74uo3vw"),
"init": MustParseCid("bafk2bzacecdrw7uedx456hnowtyyhm63mkekdlkh3vmlhvqlya6pn6pokiq5y"), "init": MustParseCid("bafk2bzaceaewh7b6zl2egclm7fqzx2lsqr57i75lb6cj43ndoa4mal3k5ld3m"),
"multisig": MustParseCid("bafk2bzaceaxyxvmng5cel5huzy5nezscm34s7wuzn2fhndexurs3xjtp7xg5i"), "multisig": MustParseCid("bafk2bzacednkwcpw5yzxjceoaliajgupzj6iqxe7ks2ll3unspbprbo5f2now"),
"paymentchannel": MustParseCid("bafk2bzacedrmyc4c6u6ipdo7hwaagx3urr47r4pw6lwv257wqbj6roumwfvky"), "paymentchannel": MustParseCid("bafk2bzacebaxhk4itfiuvbftg7kz5zxugqnvdgerobitjq4vl6q4orcwk6wqg"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacecq3bhrkatwash5zhy2275ksaj3criqb6rox5e3hsyvz7mrl2jh3o"), "reward": MustParseCid("bafk2bzacedra77pcglf7vdca2itcaa4vd6xrxynxmgfgdjdxqxfwqyhtoxehy"),
"storagemarket": MustParseCid("bafk2bzacedswrpkbh7jmttskllbblym7oj2ynxp7bxtj2fpbxsx55mraog6sc"), "storagemarket": MustParseCid("bafk2bzacea7g46y7xxu2zjq2h75x6mmx3utz2uxnlvnwi6tzpsvulna3bmiva"),
"storageminer": MustParseCid("bafk2bzacecki6ckm7gf4uje3fxvw6x5f77ukaqctvcsfha6oaecvl67veh3sg"), "storageminer": MustParseCid("bafk2bzaced7emkbbnrewv5uvrokxpf5tlm4jslu2jsv77ofw2yqdglg657uie"),
"storagepower": MustParseCid("bafk2bzacecjcvxwibkgpufeah33gfd2jzlqjx5rn2pguvvch2squon23u6kne"), "storagepower": MustParseCid("bafk2bzacedd3ka44k7d46ckbinjhv3diyuu2epgbyvhqqyjkc64qlrg3wlgzi"),
"system": MustParseCid("bafk2bzaceavvlgqbcwhy3c24aa24z23wcbzggmb66gj7x5if7o3fbvddaocc4"), "system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"),
"verifiedregistry": MustParseCid("bafk2bzacedmxdtnpy2mc63b6bi2h4vp4dfc6hxjckqnwaxyijgkpmangygcmk"), "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"),
}, },
}, { }, {
Network: "caterpillarnet", Network: "caterpillarnet",
@ -318,25 +318,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
}, { }, {
Network: "caterpillarnet", Network: "caterpillarnet",
Version: 12, Version: 12,
BundleGitTag: "v11.0.0", BundleGitTag: "v12.0.0-rc.2",
ManifestCid: MustParseCid("bafy2bzacebexc2jgzwr5ngn6jdnkwdqwwmcapajuypdgvopoe6bnvp4yxm4o2"), ManifestCid: MustParseCid("bafy2bzacechvmc3iy6qrsbb6xwjhjibqpznqnkajowjsdmrq42ie5hysqemqa"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzaceanjiq5m3feytue5m7hhxfkob2ofg2greoct5tr77reuhrjglo66g"), "account": MustParseCid("bafk2bzacecereuhejfvodut5357cai4lmhsyr7uenhcxvmw6jpmhe6auuly32"),
"cron": MustParseCid("bafk2bzaceavgd5qj6n744tukhdrvxejygzs3jnlizmcvjsdnxkgiimrd5jrys"), "cron": MustParseCid("bafk2bzacebo2whgy6jla4jsf5j4ovlqm2e4eepedlpw5wadas33yxmunis4b4"),
"datacap": MustParseCid("bafk2bzacedmdywxwrzop2gmf4ys5stydlmvbe35j3nyr2efmf273briksuvse"), "datacap": MustParseCid("bafk2bzacecjjncl7ftgj4mrzxxfxld74pt3pyfrxmcru7a5auab25b3aoixm6"),
"eam": MustParseCid("bafk2bzacec7qo7s72li7tqysllstlrxxm2dhfqv2w32pytel2e775cki4ozqm"), "eam": MustParseCid("bafk2bzacebyvawfzoxy7k4yxrj5nd3amg4rcopmnslxdwpzumfhsz5ezk4sws"),
"ethaccount": MustParseCid("bafk2bzaceaygtkliu26ubb7ivljrvaeesp5sbjlis5okzl35ishxioa2tlx4w"), "ethaccount": MustParseCid("bafk2bzaceaccs76uc6osvb2iy6w2pumqei3wdjtxq7rgtsotobncmqoi7kzcg"),
"evm": MustParseCid("bafk2bzacebo7iqzy2ophz4f3civzwlltec7q5fut7kmtfckr6vy33r6ic5eqe"), "evm": MustParseCid("bafk2bzaceawxgjzjkhbqwj36wzxeqbtngdh6y2tp4wsi27k7tbg2ujhw5rsjg"),
"init": MustParseCid("bafk2bzaceb7uzzlsquqwrqhb2vpbvk3jgr4wp5i3smu2splnag2v5sppdehns"), "init": MustParseCid("bafk2bzacedws5od7o6ktqyo2hudmipxuubsv2lwxz45xxjn2zguze72t6zoik"),
"multisig": MustParseCid("bafk2bzacebwibfqrytobl4pjtny244zkmfoomazbap3r5gddjryckx5js4csi"), "multisig": MustParseCid("bafk2bzacecb4wk6n4lrmml3tssn6cszd4dc7ttux3kzjatrawhg4o6ovrng6w"),
"paymentchannel": MustParseCid("bafk2bzacecuaa5esuxpouigxoamyl5gire2qqqhvyhewsig2x2j73f6ksh7go"), "paymentchannel": MustParseCid("bafk2bzacea3eb556mkjvosfbqfbyfg6dgu52rfnuctwzjy3b2bh2azredxzbo"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzaced4xxqhv63njf2ibvsqshlwikafctxev7aho5lgsfxyt2javjwvtw"), "reward": MustParseCid("bafk2bzaceb2x5zgkrho373l3ippp6krs7brugssg6hj2tib22xmqjzdm2o25a"),
"storagemarket": MustParseCid("bafk2bzacedwtx3xokqmbgkgkoqkdt6lam4ymdjb3eznlbtec5wcrtx74l2bpc"), "storagemarket": MustParseCid("bafk2bzaced5j6drzmsebpxbf2dtptrc5tyidlbftdljqxavxqb57s2qpbvdek"),
"storageminer": MustParseCid("bafk2bzacebbbe4sdo3xxkez7x7lkl6j46w34vx7eg7xswmdzhp7moa44p3wjg"), "storageminer": MustParseCid("bafk2bzacec7al5izu3ynnb7wg6fxxck3hebtkvlgynufjwcio57jd3n4ke2ue"),
"storagepower": MustParseCid("bafk2bzacedfgz6n24tjsor4pcayomim2f5f3a3fgyatmjgwxxeejna7okndda"), "storagepower": MustParseCid("bafk2bzaceagp6ilkltsltwii66nz6a4zen4qtfk7rdkvdv3gzq7fbv4ivox3u"),
"system": MustParseCid("bafk2bzacebxfzeom3d7ahcz2n2nlwp7ncv767bdbbrisugks4l6v7lcu2tmyg"), "system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"),
"verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"), "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"),
}, },
}, { }, {
Network: "devnet", Network: "devnet",
@ -424,25 +424,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
}, { }, {
Network: "devnet", Network: "devnet",
Version: 12, Version: 12,
BundleGitTag: "v11.0.0", BundleGitTag: "v12.0.0-rc.2",
ManifestCid: MustParseCid("bafy2bzaceay35go4xbjb45km6o46e5bib3bi46panhovcbedrynzwmm3drr4i"), ManifestCid: MustParseCid("bafy2bzaceau5i7eanhvss22z5ixmyrihilfniqn22tvkecjj56akz4xj7fvku"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacecf2pprkbdlpm4e2xz3ufunxtgrgyh2ie3stuqiyhibsvdze7kvri"), "account": MustParseCid("bafk2bzacedki4apynvdxxuoigmqkgaktgy2erjftoxqxqaklnelgveyaqknfu"),
"cron": MustParseCid("bafk2bzaceasr5d2skowvzv5mzsyak6waqrgc46ewj6rzbapkfi5woom6n6bwa"), "cron": MustParseCid("bafk2bzacebjpczf7qtcisy3zdp3sqoohxe75tgupmdo5dr26vh7orzrsjn3b2"),
"datacap": MustParseCid("bafk2bzaceaqd77gptubupda7rp7daxkxbkzwc253dxhiyoezxvj2tljmkgpny"), "datacap": MustParseCid("bafk2bzacecz4esatk7gizdc7yvl6soigkelhix7izbc75q6eqtb7gjzavpcqc"),
"eam": MustParseCid("bafk2bzacedve6p4ye6zxydjbfs4ode5r2equ7rqzpyltujsq2lu6wyxnijfx4"), "eam": MustParseCid("bafk2bzacebhtpd5mxfyovi7fgsfj62nhtmh4t5guob4sgq73ymgsk7473ltig"),
"ethaccount": MustParseCid("bafk2bzacea25xfsxwew3h2crer6jlb4c5vwu2gtch2jh73ocuxjhupenyrugy"), "ethaccount": MustParseCid("bafk2bzacebvdbbw5ag4qnxd7cif5mtakrw4wzv63diwl7awta5plaidfay4vg"),
"evm": MustParseCid("bafk2bzacece5hivtkmi757lyfahgti7xuqgofodb2u65pxgf6oizfwiiwlcsi"), "evm": MustParseCid("bafk2bzacebb7vrhprnshn52bzfmypjdpcrcfecapk232a6gapk3kghu2mp67q"),
"init": MustParseCid("bafk2bzacecxnr5y7qifzdqqiwfbjxv2yr7lbkcyu3e2mf5zjdncteupxdlquu"), "init": MustParseCid("bafk2bzaceaw4iouukgqxmwukfpt3sakdvsu75ftjvw47swnwtdftz5oszbt4w"),
"multisig": MustParseCid("bafk2bzaceayap4k4u3lbysaeeixct5fvhmafy3fa5eagvdpk3i4a7ubfdpobe"), "multisig": MustParseCid("bafk2bzaceahyjwf6re4mnuwhopglo3qzh6aboluboncpijm7vuiz3u4bkazho"),
"paymentchannel": MustParseCid("bafk2bzaceafgrz5wepbein35gie7rnsu7zttxvgllgdneuefmmy4j5izydtza"), "paymentchannel": MustParseCid("bafk2bzaceaupjw3djghaqw3g3hd4tw7uuas3njkszgzx2fhmgqh5eh4e6q2by"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacedwbtfqlx47fdkxjrb5mwiatheci44x3zkpx33smybc2cme23ymuo"), "reward": MustParseCid("bafk2bzacebzso6xkjxdscbpncw7el2d4hap6lfkgwqzrbc76lzp33vkwk6obc"),
"storagemarket": MustParseCid("bafk2bzaceaj74fmooaf3gj3ebwon64ky7hhdh7kytdr3agclqfrqzmpzykh7g"), "storagemarket": MustParseCid("bafk2bzacebzg74vyk3gzbhnz4zviwvxblyar574mtd6ayognmsvlkriejmunu"),
"storageminer": MustParseCid("bafk2bzacedb7bokkzzs7hnbhivp74pgcpermuy7j6b3ncodylksukkxtnn7ze"), "storageminer": MustParseCid("bafk2bzaceckqrzomdnfb35byrhabrmmapxplj66cv3efw7u62qswjaqsuxah4"),
"storagepower": MustParseCid("bafk2bzacedilnkegizkxz3nuutib4d4wwlk4bkla22loepia2h53yf4hysmq6"), "storagepower": MustParseCid("bafk2bzacebbtj2m2ajawfuzxqz5nmdep7xevjo2qfjqa5tx3vr5m6qojolya4"),
"system": MustParseCid("bafk2bzacedpyoncjbl4oxkjm5e77ngvpy2xfajjc4myfsv2vltvzxioattlu2"), "system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"),
"verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"), "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"),
}, },
}, { }, {
Network: "hyperspace", Network: "hyperspace",
@ -553,25 +553,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
}, { }, {
Network: "mainnet", Network: "mainnet",
Version: 12, Version: 12,
BundleGitTag: "v11.0.0", BundleGitTag: "v12.0.0-rc.2",
ManifestCid: MustParseCid("bafy2bzacecnhaiwcrpyjvzl4uv4q3jzoif26okl3m66q3cijp3dfwlcxwztwo"), ManifestCid: MustParseCid("bafy2bzacecooh5cjxkmraawn7wvmrx7k5hwdmbvqdteujr33mnaeclsazyb6a"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzacealnlr7st6lkwoh6wxpf2hnrlex5sknaopgmkr2tuhg7vmbfy45so"), "account": MustParseCid("bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa"),
"cron": MustParseCid("bafk2bzacebpewdvvgt6tk2o2u4rcovdgym67tadiis5usemlbejg7k3kt567o"), "cron": MustParseCid("bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls"),
"datacap": MustParseCid("bafk2bzacebslykoyrb2hm7aacjngqgd5n2wmeii2goadrs5zaya3pvdf6pdnq"), "datacap": MustParseCid("bafk2bzacebpiwb2ml4qbnnaayxumtk43ryhc63exdgnhivy3hwgmzemawsmpq"),
"eam": MustParseCid("bafk2bzaceaelwt4yfsfvsu3pa3miwalsvy3cfkcjvmt4sqoeopsppnrmj2mf2"), "eam": MustParseCid("bafk2bzaceb3elj4hfbbjp7g5bptc7su7mptszl4nlqfedilxvstjo5ungm6oe"),
"ethaccount": MustParseCid("bafk2bzaceclkmc4yidxc6lgcjpfypbde2eddnevcveo4j5kmh4ek6inqysz2k"), "ethaccount": MustParseCid("bafk2bzaceb4gkau2vgsijcxpfuq33bd7w3efr2rrhxrwiacjmns2ntdiamswq"),
"evm": MustParseCid("bafk2bzacediwh6etwzwmb5pivtclpdplewdjzphouwqpppce6opisjv2fjqfe"), "evm": MustParseCid("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi"),
"init": MustParseCid("bafk2bzaceckwf3w6n2nw6eh77ktmsxqgsvshonvgnyk5q5syyngtetxvasfxg"), "init": MustParseCid("bafk2bzacebllyegx5r6lggf6ymyetbp7amacwpuxakhtjvjtvoy2bfkzk3vms"),
"multisig": MustParseCid("bafk2bzaceafajceqwg5ybiz7xw6rxammuirkgtuv625gzaehsqfprm4bazjmk"), "multisig": MustParseCid("bafk2bzacecw5lyp3n3t67xdwrmo36h4z7afc3lobmmr6wg55w6yjzg5jhmh42"),
"paymentchannel": MustParseCid("bafk2bzaceb4e6cnsnviegmqvsmoxzncruvhra54piq7bwiqfqevle6oob2gvo"), "paymentchannel": MustParseCid("bafk2bzacectv4cm47bnhga5febf3lo3fq47g72kmmp2xd5s6tcxz7hiqdywa4"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacebwjw2vxkobs7r2kwjdqqb42h2kucyuk6flbnyzw4odg5s4mogamo"), "reward": MustParseCid("bafk2bzacealqnxn5lwzwexd6reav4dppypquklx2ujlnvaxiqk2tzstyvkp5u"),
"storagemarket": MustParseCid("bafk2bzaceazu2j2zu4p24tr22btnqzkhzjvyjltlvsagaj6w3syevikeb5d7m"), "storagemarket": MustParseCid("bafk2bzacedylkg5am446lcuih4voyzdn4yjeqfsxfzh5b6mcuhx4mok5ph5c4"),
"storageminer": MustParseCid("bafk2bzacec24okjqrp7c7rj3hbrs5ez5apvwah2ruka6haesgfngf37mhk6us"), "storageminer": MustParseCid("bafk2bzacedio7wfaqutc4w6gl2dwqkgpcatz2r223ms74zxiovbjrfxmzeiou"),
"storagepower": MustParseCid("bafk2bzaceaxgloxuzg35vu7l7tohdgaq2frsfp4ejmuo7tkoxjp5zqrze6sf4"), "storagepower": MustParseCid("bafk2bzacecsij5tpfzjpfuckxvccv2p3bdqjklkrfyyoei6lx5dyj5j4fvjm6"),
"system": MustParseCid("bafk2bzaced7npe5mt5nh72jxr2igi2sofoa7gedt4w6kueeke7i3xxugqpjfm"), "system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"),
"verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"), "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"),
}, },
}, { }, {
Network: "testing", Network: "testing",
@ -659,25 +659,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
}, { }, {
Network: "testing", Network: "testing",
Version: 12, Version: 12,
BundleGitTag: "v11.0.0", BundleGitTag: "v12.0.0-rc.2",
ManifestCid: MustParseCid("bafy2bzacea2vxre32tg3xhpejrktiuzx4d3pcoe7yyazgscfibmegmchr6n42"), ManifestCid: MustParseCid("bafy2bzacec7artlmtdmfuhurd2i7dgturiveblruub5xh3yizjtp5qzye3dly"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"), "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"),
"cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"),
"datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"), "datacap": MustParseCid("bafk2bzaceduhmqcyailiwdupt2ottfzh5hcrjoyeyiaipf3idk3mu7y3uz2mc"),
"eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"), "eam": MustParseCid("bafk2bzaceb2yzzw6dcmcmhnt3mqnm4kah66f23pc4escnto3vwa552t6ctr7i"),
"ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"), "ethaccount": MustParseCid("bafk2bzacebwkvvbmttkcjjlicp4ineozc52i5sc6d46pcoq6lzzs2p5i2youa"),
"evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"), "evm": MustParseCid("bafk2bzacedetwacs6wmoksxwjlbpp4442uav7fd3pagadejm2cph7ucym7eck"),
"init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"), "init": MustParseCid("bafk2bzacedhpoycn4sz7dragmbo5yqjspqriydxhplqdeguaqck2hmq5hgwqg"),
"multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"), "multisig": MustParseCid("bafk2bzaceacc3m23yvnpzoeekstqtr2acutfv4zvsgncorjdrsucymjohzxs4"),
"paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"), "paymentchannel": MustParseCid("bafk2bzaceac6i76vfexefqf6qgebkhkf2cb4g664d5nmfh2dric5spgykevd2"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"), "reward": MustParseCid("bafk2bzaceaajyncjxcrl7wbb6ukzkueyqz4uyekxpmtn4mpndkf7ksmggopzq"),
"storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"), "storagemarket": MustParseCid("bafk2bzaced6cexdcinwjhtfvuxgkxukiejp3goylaxtvhqfd24rs5z7g2z7dm"),
"storageminer": MustParseCid("bafk2bzaced3yg5lctmswnbkxyd6cleg3llyux7fu2vbddyd2ho36fpym423mq"), "storageminer": MustParseCid("bafk2bzaceclcbtu6edh73cn7ga7kzkmxcfnjt7jnxolkc5id6l6f7sztevm24"),
"storagepower": MustParseCid("bafk2bzacebvpdf372fzxgixztbz2r7ayxyvx7jmdxwlfuqt2cq7tnqgie3klw"), "storagepower": MustParseCid("bafk2bzacedexrf5qplrrl5xzijfrthjdqwodfs5e6zj5kpztc7qnywbqdyiii"),
"system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"), "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
"verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
}, },
}, { }, {
Network: "testing-fake-proofs", Network: "testing-fake-proofs",
@ -765,24 +765,24 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
}, { }, {
Network: "testing-fake-proofs", Network: "testing-fake-proofs",
Version: 12, Version: 12,
BundleGitTag: "v11.0.0", BundleGitTag: "v12.0.0-rc.2",
ManifestCid: MustParseCid("bafy2bzacecojemqglhzzhjnhgtrcbsgkyv67ziytvtbhwlr4ym4oxqofv7zui"), ManifestCid: MustParseCid("bafy2bzaceand7owcb3omq7zjwir2q3lqapprl2eyiwpzv6oqs5bok4vzzqjbu"),
Actors: map[string]cid.Cid{ Actors: map[string]cid.Cid{
"account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"), "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"),
"cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"),
"datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"), "datacap": MustParseCid("bafk2bzaceduhmqcyailiwdupt2ottfzh5hcrjoyeyiaipf3idk3mu7y3uz2mc"),
"eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"), "eam": MustParseCid("bafk2bzaceb2yzzw6dcmcmhnt3mqnm4kah66f23pc4escnto3vwa552t6ctr7i"),
"ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"), "ethaccount": MustParseCid("bafk2bzacebwkvvbmttkcjjlicp4ineozc52i5sc6d46pcoq6lzzs2p5i2youa"),
"evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"), "evm": MustParseCid("bafk2bzacedetwacs6wmoksxwjlbpp4442uav7fd3pagadejm2cph7ucym7eck"),
"init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"), "init": MustParseCid("bafk2bzacedhpoycn4sz7dragmbo5yqjspqriydxhplqdeguaqck2hmq5hgwqg"),
"multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"), "multisig": MustParseCid("bafk2bzaceacc3m23yvnpzoeekstqtr2acutfv4zvsgncorjdrsucymjohzxs4"),
"paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"), "paymentchannel": MustParseCid("bafk2bzaceac6i76vfexefqf6qgebkhkf2cb4g664d5nmfh2dric5spgykevd2"),
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
"reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"), "reward": MustParseCid("bafk2bzaceaajyncjxcrl7wbb6ukzkueyqz4uyekxpmtn4mpndkf7ksmggopzq"),
"storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"), "storagemarket": MustParseCid("bafk2bzaced6cexdcinwjhtfvuxgkxukiejp3goylaxtvhqfd24rs5z7g2z7dm"),
"storageminer": MustParseCid("bafk2bzacebqeztpa5exztccqjwqhan5droiy7ga6zekm6f2gzxoe655vneczm"), "storageminer": MustParseCid("bafk2bzacebbusvtnyhi7mimbnx2tw2uyvrr3fbmncbf6frx6e4kvjvoqllu36"),
"storagepower": MustParseCid("bafk2bzaceb2tlyuwxncdxsh3hc4fwcjnpxaijkiv54ustwdjbrqabxdsc27km"), "storagepower": MustParseCid("bafk2bzacecdwijcbbryinjtm27pdinqqkyzoskri24pwsvsadwcq2alkkjpnc"),
"system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"), "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
"verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
}, },
}} }}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -67,6 +67,9 @@ var UpgradeThunderHeight = abi.ChainEpoch(-23)
var UpgradeWatermelonHeight = abi.ChainEpoch(200) var UpgradeWatermelonHeight = abi.ChainEpoch(200)
// This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -100
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
} }

View File

@ -19,7 +19,7 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
} }
const GenesisNetworkVersion = network.Version18 const GenesisNetworkVersion = network.Version20
var NetworkBundle = "butterflynet" var NetworkBundle = "butterflynet"
var BundleOverrides map[actorstypes.Version]string var BundleOverrides map[actorstypes.Version]string
@ -52,10 +52,13 @@ const UpgradeOhSnapHeight = -18
const UpgradeSkyrHeight = -19 const UpgradeSkyrHeight = -19
const UpgradeSharkHeight = -20 const UpgradeSharkHeight = -20
const UpgradeHyggeHeight = -21 const UpgradeHyggeHeight = -21
const UpgradeLightningHeight = -22
const UpgradeThunderHeight = -23
const UpgradeLightningHeight = 50 const UpgradeWatermelonHeight = 400
const UpgradeThunderHeight = UpgradeLightningHeight + 360 // This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -100
// ?????????? // ??????????
const UpgradeWatermelonHeight = 999999999999999 const UpgradeWatermelonHeight = 999999999999999

View File

@ -79,8 +79,11 @@ const UpgradeLightningHeight = 489094
// 2023-04-21T16:00:00Z // 2023-04-21T16:00:00Z
const UpgradeThunderHeight = UpgradeLightningHeight + 3120 const UpgradeThunderHeight = UpgradeLightningHeight + 3120
// ?????????? // 2023-10-19T13:00:00Z
const UpgradeWatermelonHeight = 999999999999999 const UpgradeWatermelonHeight = 1013134
// 2023-11-07T13:00:00Z
const UpgradeWatermelonFixHeight = 1070494
var SupportedProofTypes = []abi.RegisteredSealProof{ var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1,

View File

@ -56,6 +56,9 @@ var UpgradeThunderHeight = abi.ChainEpoch(-23)
const UpgradeWatermelonHeight = 50 const UpgradeWatermelonHeight = 50
// This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -1
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
} }

View File

@ -95,8 +95,11 @@ const UpgradeLightningHeight = 2809800
// 2023-05-18T13:00:00Z // 2023-05-18T13:00:00Z
const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21 const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21
// ??????? // 2023-11-29T13:30:00Z
var UpgradeWatermelonHeight = abi.ChainEpoch(9999999999) var UpgradeWatermelonHeight = abi.ChainEpoch(3431940)
// This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight = -1
var SupportedProofTypes = []abi.RegisteredSealProof{ var SupportedProofTypes = []abi.RegisteredSealProof{
abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1,

View File

@ -87,29 +87,30 @@ var (
UpgradeBreezeHeight abi.ChainEpoch = -1 UpgradeBreezeHeight abi.ChainEpoch = -1
BreezeGasTampingDuration abi.ChainEpoch = 0 BreezeGasTampingDuration abi.ChainEpoch = 0
UpgradeSmokeHeight abi.ChainEpoch = -1 UpgradeSmokeHeight abi.ChainEpoch = -1
UpgradeIgnitionHeight abi.ChainEpoch = -2 UpgradeIgnitionHeight abi.ChainEpoch = -2
UpgradeRefuelHeight abi.ChainEpoch = -3 UpgradeRefuelHeight abi.ChainEpoch = -3
UpgradeTapeHeight abi.ChainEpoch = -4 UpgradeTapeHeight abi.ChainEpoch = -4
UpgradeAssemblyHeight abi.ChainEpoch = 10 UpgradeAssemblyHeight abi.ChainEpoch = 10
UpgradeLiftoffHeight abi.ChainEpoch = -5 UpgradeLiftoffHeight abi.ChainEpoch = -5
UpgradeKumquatHeight abi.ChainEpoch = -6 UpgradeKumquatHeight abi.ChainEpoch = -6
UpgradeCalicoHeight abi.ChainEpoch = -8 UpgradeCalicoHeight abi.ChainEpoch = -8
UpgradePersianHeight abi.ChainEpoch = -9 UpgradePersianHeight abi.ChainEpoch = -9
UpgradeOrangeHeight abi.ChainEpoch = -10 UpgradeOrangeHeight abi.ChainEpoch = -10
UpgradeClausHeight abi.ChainEpoch = -11 UpgradeClausHeight abi.ChainEpoch = -11
UpgradeTrustHeight abi.ChainEpoch = -12 UpgradeTrustHeight abi.ChainEpoch = -12
UpgradeNorwegianHeight abi.ChainEpoch = -13 UpgradeNorwegianHeight abi.ChainEpoch = -13
UpgradeTurboHeight abi.ChainEpoch = -14 UpgradeTurboHeight abi.ChainEpoch = -14
UpgradeHyperdriveHeight abi.ChainEpoch = -15 UpgradeHyperdriveHeight abi.ChainEpoch = -15
UpgradeChocolateHeight abi.ChainEpoch = -16 UpgradeChocolateHeight abi.ChainEpoch = -16
UpgradeOhSnapHeight abi.ChainEpoch = -17 UpgradeOhSnapHeight abi.ChainEpoch = -17
UpgradeSkyrHeight abi.ChainEpoch = -18 UpgradeSkyrHeight abi.ChainEpoch = -18
UpgradeSharkHeight abi.ChainEpoch = -19 UpgradeSharkHeight abi.ChainEpoch = -19
UpgradeHyggeHeight abi.ChainEpoch = -20 UpgradeHyggeHeight abi.ChainEpoch = -20
UpgradeLightningHeight abi.ChainEpoch = -21 UpgradeLightningHeight abi.ChainEpoch = -21
UpgradeThunderHeight abi.ChainEpoch = -22 UpgradeThunderHeight abi.ChainEpoch = -22
UpgradeWatermelonHeight abi.ChainEpoch = -23 UpgradeWatermelonHeight abi.ChainEpoch = -23
UpgradeWatermelonFixHeight abi.ChainEpoch = -24
DrandSchedule = map[abi.ChainEpoch]DrandEnum{ DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,

View File

@ -23,6 +23,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
) )
var InitActorAddr = builtin.InitActorAddr
var SystemActorAddr = builtin.SystemActorAddr var SystemActorAddr = builtin.SystemActorAddr
var BurntFundsActorAddr = builtin.BurntFundsActorAddr var BurntFundsActorAddr = builtin.BurntFundsActorAddr
var CronActorAddr = builtin.CronActorAddr var CronActorAddr = builtin.CronActorAddr

View File

@ -23,6 +23,7 @@ import (
smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing"
) )
var InitActorAddr = builtin.InitActorAddr
var SystemActorAddr = builtin.SystemActorAddr var SystemActorAddr = builtin.SystemActorAddr
var BurntFundsActorAddr = builtin.BurntFundsActorAddr var BurntFundsActorAddr = builtin.BurntFundsActorAddr
var CronActorAddr = builtin.CronActorAddr var CronActorAddr = builtin.CronActorAddr

View File

@ -19,7 +19,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
{{if (le .v 7)}} {{if (le .v 7)}}

View File

@ -11,13 +11,13 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -17,11 +17,11 @@ import (
market10 "github.com/filecoin-project/go-state-types/builtin/v10/market" market10 "github.com/filecoin-project/go-state-types/builtin/v10/market"
adt10 "github.com/filecoin-project/go-state-types/builtin/v10/util/adt" adt10 "github.com/filecoin-project/go-state-types/builtin/v10/util/adt"
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -17,11 +17,11 @@ import (
market11 "github.com/filecoin-project/go-state-types/builtin/v11/market" market11 "github.com/filecoin-project/go-state-types/builtin/v11/market"
adt11 "github.com/filecoin-project/go-state-types/builtin/v11/util/adt" adt11 "github.com/filecoin-project/go-state-types/builtin/v11/util/adt"
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -17,11 +17,11 @@ import (
market12 "github.com/filecoin-project/go-state-types/builtin/v12/market" market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt" adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt"
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -11,13 +11,13 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -11,13 +11,13 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -11,13 +11,13 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -11,13 +11,13 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market"
adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -13,13 +13,13 @@ import (
rlepluslazy "github.com/filecoin-project/go-bitfield/rle" rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market" market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market"
adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -13,13 +13,13 @@ import (
rlepluslazy "github.com/filecoin-project/go-bitfield/rle" rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -16,11 +16,11 @@ import (
market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" market8 "github.com/filecoin-project/go-state-types/builtin/v8/market"
adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt"
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -17,11 +17,11 @@ import (
market9 "github.com/filecoin-project/go-state-types/builtin/v9/market" market9 "github.com/filecoin-project/go-state-types/builtin/v9/market"
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )

View File

@ -154,7 +154,7 @@ type Partition interface {
type SectorOnChainInfo = minertypes.SectorOnChainInfo type SectorOnChainInfo = minertypes.SectorOnChainInfo
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old
// ones in network version 8. // ones in network version 8.
if nver < network.Version7 { if nver < network.Version7 {
@ -174,17 +174,34 @@ func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.Re
} }
} }
if nver < MinSyntheticPoRepVersion || !configWantSynthetic {
switch proof {
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1:
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1:
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1:
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1:
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1:
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
default:
return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
}
}
switch proof { switch proof {
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1:
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil return abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1:
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil return abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1:
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil return abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1:
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil return abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1:
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil return abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep, nil
default: default:
return -1, xerrors.Errorf("unrecognized window post type: %d", proof) return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
} }
@ -213,8 +230,37 @@ type PendingBeneficiaryChange = minertypes.PendingBeneficiaryChange
type WorkerKeyChange = minertypes.WorkerKeyChange type WorkerKeyChange = minertypes.WorkerKeyChange
type SectorPreCommitOnChainInfo = minertypes.SectorPreCommitOnChainInfo type SectorPreCommitOnChainInfo = minertypes.SectorPreCommitOnChainInfo
type SectorPreCommitInfo = minertypes.SectorPreCommitInfo type SectorPreCommitInfo = minertypes.SectorPreCommitInfo
type SubmitWindowedPoStParams = minertypes.SubmitWindowedPoStParams
type PoStPartition = minertypes.PoStPartition
type RecoveryDeclaration = minertypes.RecoveryDeclaration
type FaultDeclaration = minertypes.FaultDeclaration
type DeclareFaultsRecoveredParams = minertypes.DeclareFaultsRecoveredParams
type DeclareFaultsParams = minertypes.DeclareFaultsParams
type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams
type ProveCommitSectorParams = minertypes.ProveCommitSectorParams
type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams
type ReplicaUpdate = minertypes.ReplicaUpdate
type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams
type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2
type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params
type SectorClaim = minertypes.SectorClaim
type ExpirationExtension2 = minertypes.ExpirationExtension2
type CompactPartitionsParams = minertypes.CompactPartitionsParams
type WithdrawBalanceParams = minertypes.WithdrawBalanceParams
var QAPowerMax = minertypes.QAPowerMax
type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo
var WPoStProvingPeriod = func() abi.ChainEpoch { return minertypes.WPoStProvingPeriod }
var WPoStChallengeWindow = func() abi.ChainEpoch { return minertypes.WPoStChallengeWindow }
const WPoStPeriodDeadlines = minertypes.WPoStPeriodDeadlines
const WPoStChallengeLookback = minertypes.WPoStChallengeLookback
const FaultDeclarationCutoff = minertypes.FaultDeclarationCutoff
const MinAggregatedSectors = minertypes.MinAggregatedSectors
const MinSectorExpiration = minertypes.MinSectorExpiration
type SectorExpiration struct { type SectorExpiration struct {
OnTime abi.ChainEpoch OnTime abi.ChainEpoch

View File

@ -214,7 +214,7 @@ type Partition interface {
type SectorOnChainInfo = minertypes.SectorOnChainInfo type SectorOnChainInfo = minertypes.SectorOnChainInfo
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old
// ones in network version 8. // ones in network version 8.
if nver < network.Version7 { if nver < network.Version7 {
@ -234,17 +234,34 @@ func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.Re
} }
} }
if nver < MinSyntheticPoRepVersion || !configWantSynthetic {
switch proof {
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1:
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1:
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1:
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1:
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1:
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil
default:
return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
}
}
switch proof { switch proof {
case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1:
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil return abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1:
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil return abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1:
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil return abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1:
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil return abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1: case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1:
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil return abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep, nil
default: default:
return -1, xerrors.Errorf("unrecognized window post type: %d", proof) return -1, xerrors.Errorf("unrecognized window post type: %d", proof)
} }
@ -273,8 +290,37 @@ type PendingBeneficiaryChange = minertypes.PendingBeneficiaryChange
type WorkerKeyChange = minertypes.WorkerKeyChange type WorkerKeyChange = minertypes.WorkerKeyChange
type SectorPreCommitOnChainInfo = minertypes.SectorPreCommitOnChainInfo type SectorPreCommitOnChainInfo = minertypes.SectorPreCommitOnChainInfo
type SectorPreCommitInfo = minertypes.SectorPreCommitInfo type SectorPreCommitInfo = minertypes.SectorPreCommitInfo
type SubmitWindowedPoStParams = minertypes.SubmitWindowedPoStParams
type PoStPartition = minertypes.PoStPartition
type RecoveryDeclaration = minertypes.RecoveryDeclaration
type FaultDeclaration = minertypes.FaultDeclaration
type DeclareFaultsRecoveredParams = minertypes.DeclareFaultsRecoveredParams
type DeclareFaultsParams = minertypes.DeclareFaultsParams
type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams
type ProveCommitSectorParams = minertypes.ProveCommitSectorParams
type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams
type ReplicaUpdate = minertypes.ReplicaUpdate
type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams
type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2
type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params
type SectorClaim = minertypes.SectorClaim
type ExpirationExtension2 = minertypes.ExpirationExtension2
type CompactPartitionsParams = minertypes.CompactPartitionsParams
type WithdrawBalanceParams = minertypes.WithdrawBalanceParams
var QAPowerMax = minertypes.QAPowerMax
type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo
var WPoStProvingPeriod = func() abi.ChainEpoch { return minertypes.WPoStProvingPeriod }
var WPoStChallengeWindow = func() abi.ChainEpoch { return minertypes.WPoStChallengeWindow }
const WPoStPeriodDeadlines = minertypes.WPoStPeriodDeadlines
const WPoStChallengeLookback = minertypes.WPoStChallengeLookback
const FaultDeclarationCutoff = minertypes.FaultDeclarationCutoff
const MinAggregatedSectors = minertypes.MinAggregatedSectors
const MinSectorExpiration = minertypes.MinSectorExpiration
type SectorExpiration struct { type SectorExpiration struct {
OnTime abi.ChainEpoch OnTime abi.ChainEpoch

View File

@ -72,7 +72,7 @@ func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.Token
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal) available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -8,6 +8,8 @@ import (
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
) )
var MinSyntheticPoRepVersion = network.Version21
func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) { func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
var parts []bitfield.BitField var parts []bitfield.BitField
@ -31,7 +33,7 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error))
// SealProofTypeFromSectorSize returns preferred seal proof type for creating // SealProofTypeFromSectorSize returns preferred seal proof type for creating
// new miner actors and new sectors // new miner actors and new sectors
func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) { func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version, synthetic bool) (abi.RegisteredSealProof, error) {
switch { switch {
case nv < network.Version7: case nv < network.Version7:
switch ssize { switch ssize {
@ -49,25 +51,49 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
} }
case nv >= network.Version7: case nv >= network.Version7:
var v abi.RegisteredSealProof
switch ssize { switch ssize {
case 2 << 10: case 2 << 10:
return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil v = abi.RegisteredSealProof_StackedDrg2KiBV1_1
case 8 << 20: case 8 << 20:
return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil v = abi.RegisteredSealProof_StackedDrg8MiBV1_1
case 512 << 20: case 512 << 20:
return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil v = abi.RegisteredSealProof_StackedDrg512MiBV1_1
case 32 << 30: case 32 << 30:
return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil v = abi.RegisteredSealProof_StackedDrg32GiBV1_1
case 64 << 30: case 64 << 30:
return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil v = abi.RegisteredSealProof_StackedDrg64GiBV1_1
default: default:
return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize)
} }
if nv >= MinSyntheticPoRepVersion && synthetic {
return toSynthetic(v)
} else {
return v, nil
}
} }
return 0, xerrors.Errorf("unsupported network version") return 0, xerrors.Errorf("unsupported network version")
} }
func toSynthetic(in abi.RegisteredSealProof) (abi.RegisteredSealProof, error) {
switch in {
case abi.RegisteredSealProof_StackedDrg2KiBV1_1:
return abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredSealProof_StackedDrg8MiBV1_1:
return abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredSealProof_StackedDrg512MiBV1_1:
return abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredSealProof_StackedDrg32GiBV1_1:
return abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep, nil
case abi.RegisteredSealProof_StackedDrg64GiBV1_1:
return abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep, nil
default:
return 0, xerrors.Errorf("unsupported conversion to synthetic: %v", in)
}
}
// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating // WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating
// new miner actors and new sectors // new miner actors and new sectors
func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredPoStProof, error) { func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredPoStProof, error) {

View File

@ -62,7 +62,7 @@ func (s *state0) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available = s.GetAvailableBalance(bal) available = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state10) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state11) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state12) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -61,7 +61,7 @@ func (s *state2) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state6) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state8) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -62,7 +62,7 @@ func (s *state9) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun
available = abi.NewTokenAmount(0) available = abi.NewTokenAmount(0)
} }
}() }()
// this panics if the miner doesnt have enough funds to cover their locked pledge // this panics if the miner doesn't have enough funds to cover their locked pledge
available, err = s.GetAvailableBalance(bal) available, err = s.GetAvailableBalance(bal)
return available, err return available, err
} }

View File

@ -29,7 +29,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
// Load returns an abstract copy of payment channel state, irregardless of actor version // Load returns an abstract copy of payment channel state, regardless of actor version
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
if name != manifest.PaychKey { if name != manifest.PaychKey {

View File

@ -29,7 +29,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
// Load returns an abstract copy of payment channel state, irregardless of actor version // Load returns an abstract copy of payment channel state, regardless of actor version
func Load(store adt.Store, act *types.Actor) (State, error) { func Load(store adt.Store, act *types.Actor) (State, error) {
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
if name != manifest.PaychKey { if name != manifest.PaychKey {

View File

@ -97,4 +97,6 @@ func AllCodes() []cid.Cid {
type Allocation = verifregtypes.Allocation type Allocation = verifregtypes.Allocation
type AllocationId = verifregtypes.AllocationId type AllocationId = verifregtypes.AllocationId
type Claim = verifregtypes.Claim type Claim = verifregtypes.Claim
type ClaimId = verifregtypes.ClaimId type ClaimId = verifregtypes.ClaimId
const NoAllocationID = verifregtypes.NoAllocationID

View File

@ -168,3 +168,5 @@ type Allocation = verifregtypes.Allocation
type AllocationId = verifregtypes.AllocationId type AllocationId = verifregtypes.AllocationId
type Claim = verifregtypes.Claim type Claim = verifregtypes.Claim
type ClaimId = verifregtypes.ClaimId type ClaimId = verifregtypes.ClaimId
const NoAllocationID = verifregtypes.NoAllocationID

View File

@ -51,6 +51,12 @@ func RegisterManifest(av actorstypes.Version, manifestCid cid.Cid, entries map[s
} }
} }
func AddActorMeta(name string, codeId cid.Cid, av actorstypes.Version) {
manifestMx.Lock()
defer manifestMx.Unlock()
actorMeta[codeId] = actorEntry{name: name, version: av}
}
// GetManifest gets a loaded manifest. // GetManifest gets a loaded manifest.
func GetManifest(av actorstypes.Version) (cid.Cid, bool) { func GetManifest(av actorstypes.Version) (cid.Cid, bool) {
manifestMx.RLock() manifestMx.RLock()

View File

@ -13,7 +13,7 @@ import (
func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) {
buf := new(bytes.Buffer) buf := new(bytes.Buffer)
if err := i.MarshalCBOR(buf); err != nil { if err := i.MarshalCBOR(buf); err != nil {
// TODO: shouldnt this be a fatal error? // TODO: shouldn't this be a fatal error?
return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter") return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter")
} }
return buf.Bytes(), nil return buf.Bytes(), nil

View File

@ -560,8 +560,53 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
return ChainFinality return ChainFinality
} }
func GetMaxSectorExpirationExtension() abi.ChainEpoch { func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) {
return miner12.MaxSectorExpirationExtension v, err := actorstypes.VersionForNetwork(nv)
if err != nil {
return 0, xerrors.Errorf("failed to get actors version: %w", err)
}
switch v {
case actorstypes.Version0:
return miner0.MaxSectorExpirationExtension, nil
case actorstypes.Version2:
return miner2.MaxSectorExpirationExtension, nil
case actorstypes.Version3:
return miner3.MaxSectorExpirationExtension, nil
case actorstypes.Version4:
return miner4.MaxSectorExpirationExtension, nil
case actorstypes.Version5:
return miner5.MaxSectorExpirationExtension, nil
case actorstypes.Version6:
return miner6.MaxSectorExpirationExtension, nil
case actorstypes.Version7:
return miner7.MaxSectorExpirationExtension, nil
case actorstypes.Version8:
return miner8.MaxSectorExpirationExtension, nil
case actorstypes.Version9:
return miner9.MaxSectorExpirationExtension, nil
case actorstypes.Version10:
return miner10.MaxSectorExpirationExtension, nil
case actorstypes.Version11:
return miner11.MaxSectorExpirationExtension, nil
case actorstypes.Version12:
return miner12.MaxSectorExpirationExtension, nil
default:
return 0, xerrors.Errorf("unsupported network version")
}
} }
func GetMinSectorExpiration() abi.ChainEpoch { func GetMinSectorExpiration() abi.ChainEpoch {
@ -577,7 +622,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e
if err != nil { if err != nil {
return 0, err return 0, err
} }
return int(uint64(maxSectors) / sectorsPerPart), nil
return min(miner12.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil
} }
func GetDefaultAggregationProof() abi.RegisteredAggregationProof { func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
@ -838,3 +884,10 @@ func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.Registere
} }
return sealProof, nil return sealProof, nil
} }
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -223,8 +223,20 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
return ChainFinality return ChainFinality
} }
func GetMaxSectorExpirationExtension() abi.ChainEpoch { func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) {
return miner{{.latestVersion}}.MaxSectorExpirationExtension v, err := actorstypes.VersionForNetwork(nv)
if err != nil {
return 0, xerrors.Errorf("failed to get actors version: %w", err)
}
switch v {
{{range .versions}}
case actorstypes.Version{{.}}:
return miner{{.}}.MaxSectorExpirationExtension, nil
{{end}}
default:
return 0, xerrors.Errorf("unsupported network version")
}
} }
func GetMinSectorExpiration() abi.ChainEpoch { func GetMinSectorExpiration() abi.ChainEpoch {
@ -240,7 +252,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e
if err != nil { if err != nil {
return 0, err return 0, err
} }
return int(uint64(maxSectors) / sectorsPerPart), nil
return min(miner{{.latestVersion}}.PoStedPartitionsMax, int(uint64(maxSectors) / sectorsPerPart)), nil
} }
func GetDefaultAggregationProof() abi.RegisteredAggregationProof { func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
@ -346,3 +359,10 @@ func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.Registere
} }
return sealProof, nil return sealProof, nil
} }
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -7,7 +7,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
@ -74,13 +73,3 @@ func TestPartitionSizes(t *testing.T) {
require.Equal(t, sizeOld, sizeNew) require.Equal(t, sizeOld, sizeNew)
} }
} }
func TestPoStSize(t *testing.T) {
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
v12PoStSize, err := GetMaxPoStPartitions(network.Version12, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1)
require.Equal(t, 4, v12PoStSize)
require.NoError(t, err)
v13PoStSize, err := GetMaxPoStPartitions(network.Version13, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1)
require.NoError(t, err)
require.Equal(t, 10, v13PoStSize)
}

View File

@ -17,7 +17,7 @@ import (
) )
func TestPrintGroupInfo(t *testing.T) { func TestPrintGroupInfo(t *testing.T) {
server := build.DrandConfigs[build.DrandDevnet].Servers[0] server := build.DrandConfigs[build.DrandTestnet].Servers[0]
c, err := hclient.New(server, nil, nil) c, err := hclient.New(server, nil, nil)
assert.NoError(t, err) assert.NoError(t, err)
cg := c.(interface { cg := c.(interface {
@ -31,7 +31,7 @@ func TestPrintGroupInfo(t *testing.T) {
func TestMaxBeaconRoundForEpoch(t *testing.T) { func TestMaxBeaconRoundForEpoch(t *testing.T) {
todayTs := uint64(1652222222) todayTs := uint64(1652222222)
db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandDevnet]) db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandTestnet])
assert.NoError(t, err) assert.NoError(t, err)
mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100) mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100)
mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100) mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100)

View File

@ -147,9 +147,6 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return xerrors.Errorf("callback failed on cron message: %w", err) return xerrors.Errorf("callback failed on cron message: %w", err)
} }
} }
if ret.ExitCode != 0 {
return xerrors.Errorf("cron exit was non-zero: %d", ret.ExitCode)
}
return nil return nil
} }

View File

@ -91,9 +91,6 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito
} }
} }
if ret.ExitCode != 0 {
return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr)
}
return nil return nil
} }

View File

@ -1,6 +1,7 @@
package filcns package filcns
import ( import (
"bytes"
"context" "context"
_ "embed" _ "embed"
"fmt" "fmt"
@ -19,7 +20,9 @@ import (
actorstypes "github.com/filecoin-project/go-state-types/actors" actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
nv18 "github.com/filecoin-project/go-state-types/builtin/v10/migration" nv18 "github.com/filecoin-project/go-state-types/builtin/v10/migration"
init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration" nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration"
system11 "github.com/filecoin-project/go-state-types/builtin/v11/system"
nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration" nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration"
nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration" nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration"
"github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/manifest"
@ -273,6 +276,10 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule {
StopWithin: 10, StopWithin: 10,
}}, }},
Expensive: true, Expensive: true,
}, {
Height: build.UpgradeWatermelonFixHeight,
Network: network.Version21,
Migration: upgradeActorsV12Fix,
}, },
} }
@ -1869,6 +1876,8 @@ func UpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.M
return newRoot, nil return newRoot, nil
} }
var calibnetv12BuggyBundle = cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs")
func upgradeActorsV12Common( func upgradeActorsV12Common(
ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet,
@ -1894,13 +1903,50 @@ func upgradeActorsV12Common(
) )
} }
manifest, ok := actors.GetManifest(actorstypes.Version12) // check whether or not this is a calibnet upgrade
if !ok { // we do this because calibnet upgraded to a "wrong" actors bundle, which was then corrected
return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade") // we thus upgrade to calibrationnet-buggy in this upgrade
actorsIn, err := state.LoadStateTree(adtStore, root)
if err != nil {
return cid.Undef, xerrors.Errorf("loading state tree: %w", err)
}
initActor, err := actorsIn.GetActor(builtin.InitActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err)
}
var initState init11.State
if err := adtStore.Get(ctx, initActor.Head, &initState); err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err)
}
var manifestCid cid.Cid
if initState.NetworkName == "calibrationnet" {
embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version12, "calibrationnet-buggy")
if !ok {
return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle")
}
var err error
manifestCid, err = bundle.LoadBundle(ctx, writeStore, bytes.NewReader(embedded))
if err != nil {
return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err)
}
if manifestCid != calibnetv12BuggyBundle {
return cid.Undef, xerrors.Errorf("didn't find expected buggy calibnet bundle manifest: %s != %s", manifestCid, calibnetv12BuggyBundle)
}
} else {
ok := false
manifestCid, ok = actors.GetManifest(actorstypes.Version12)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade")
}
} }
// Perform the migration // Perform the migration
newHamtRoot, err := nv21.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config, newHamtRoot, err := nv21.MigrateStateTree(ctx, adtStore, manifestCid, stateRoot.Actors, epoch, config,
migrationLogger{}, cache) migrationLogger{}, cache)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("upgrading to actors v12: %w", err) return cid.Undef, xerrors.Errorf("upgrading to actors v12: %w", err)
@ -1928,6 +1974,139 @@ func upgradeActorsV12Common(
return newRoot, nil return newRoot, nil
} }
//////////////////////
var calibnetv12BuggyMinerCID = cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq")
func upgradeActorsV12Fix(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor,
root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
stateStore := sm.ChainStore().StateBlockstore()
adtStore := store.ActorStore(ctx, stateStore)
// ensure that the manifest is loaded in the blockstore
if err := bundle.LoadBundles(ctx, stateStore, actorstypes.Version12); err != nil {
return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err)
}
// Load input state tree
actorsIn, err := state.LoadStateTree(adtStore, root)
if err != nil {
return cid.Undef, xerrors.Errorf("loading state tree: %w", err)
}
// load old manifest data
systemActor, err := actorsIn.GetActor(builtin.SystemActorAddr)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err)
}
var systemState system11.State
if err := adtStore.Get(ctx, systemActor.Head, &systemState); err != nil {
return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err)
}
var oldManifestData manifest.ManifestData
if err := adtStore.Get(ctx, systemState.BuiltinActors, &oldManifestData); err != nil {
return cid.Undef, xerrors.Errorf("failed to get old manifest data: %w", err)
}
newManifestCID, ok := actors.GetManifest(actorstypes.Version12)
if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade")
}
// load new manifest
var newManifest manifest.Manifest
if err := adtStore.Get(ctx, newManifestCID, &newManifest); err != nil {
return cid.Undef, xerrors.Errorf("error reading actor manifest: %w", err)
}
if err := newManifest.Load(ctx, adtStore); err != nil {
return cid.Undef, xerrors.Errorf("error loading actor manifest: %w", err)
}
// build the CID mapping
codeMapping := make(map[cid.Cid]cid.Cid, len(oldManifestData.Entries))
for _, oldEntry := range oldManifestData.Entries {
newCID, ok := newManifest.Get(oldEntry.Name)
if !ok {
return cid.Undef, xerrors.Errorf("missing manifest entry for %s", oldEntry.Name)
}
// Note: we expect newCID to be the same as oldEntry.Code for all actors except the miner actor
codeMapping[oldEntry.Code] = newCID
}
// Create empty actorsOut
actorsOut, err := state.NewStateTree(adtStore, actorsIn.Version())
if err != nil {
return cid.Undef, xerrors.Errorf("failed to create new tree: %w", err)
}
// Perform the migration
err = actorsIn.ForEach(func(a address.Address, actor *types.Actor) error {
newCid, ok := codeMapping[actor.Code]
if !ok {
return xerrors.Errorf("didn't find mapping for %s", actor.Code)
}
return actorsOut.SetActor(a, &types.ActorV5{
Code: newCid,
Head: actor.Head,
Nonce: actor.Nonce,
Balance: actor.Balance,
Address: actor.Address,
})
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to perform migration: %w", err)
}
err = actorsIn.ForEach(func(a address.Address, inActor *types.Actor) error {
outActor, err := actorsOut.GetActor(a)
if err != nil {
return xerrors.Errorf("failed to get actor in outTree: %w", err)
}
if inActor.Nonce != outActor.Nonce {
return xerrors.Errorf("mismatched nonce for actor %s", a)
}
if !inActor.Balance.Equals(outActor.Balance) {
return xerrors.Errorf("mismatched balance for actor %s: %d != %d", a, inActor.Balance, outActor.Balance)
}
if inActor.Address != outActor.Address && inActor.Address.String() != outActor.Address.String() {
return xerrors.Errorf("mismatched address for actor %s: %s != %s", a, inActor.Address, outActor.Address)
}
if inActor.Head != outActor.Head {
return xerrors.Errorf("mismatched head for actor %s", a)
}
// This is the hard-coded "buggy" miner actor Code ID
if inActor.Code != calibnetv12BuggyMinerCID && inActor.Code != outActor.Code {
return xerrors.Errorf("unexpected change in code for actor %s", a)
}
return nil
})
if err != nil {
return cid.Undef, xerrors.Errorf("failed to sanity check migration: %w", err)
}
// Persist the result.
newRoot, err := actorsOut.Flush(ctx)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err)
}
return newRoot, nil
}
////////////////////
// Example upgrade function if upgrade requires only code changes // Example upgrade function if upgrade requires only code changes
//func UpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, _ stmgr.ExecMonitor, root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) { //func UpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, _ stmgr.ExecMonitor, root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
// buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync()) // buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync())

View File

@ -180,7 +180,7 @@ func (e *heightEventsObserver) Revert(ctx context.Context, from, to *types.TipSe
// Update the head first so we don't accidental skip reverting a concurrent call to ChainAt. // Update the head first so we don't accidental skip reverting a concurrent call to ChainAt.
e.updateHead(to) e.updateHead(to)
// Call revert on all hights between the two tipsets, handling empty tipsets. // Call revert on all heights between the two tipsets, handling empty tipsets.
for h := from.Height(); h > to.Height(); h-- { for h := from.Height(); h > to.Height(); h-- {
e.lk.Lock() e.lk.Lock()
triggers := e.tsHeights[h] triggers := e.tsHeights[h]

View File

@ -673,7 +673,7 @@ func TestCalled(t *testing.T) {
}, 3, 20, matchAddrMethod(t0123, 5)) }, 3, 20, matchAddrMethod(t0123, 5))
require.NoError(t, err) require.NoError(t, err)
// create few blocks to make sure nothing get's randomly called // create few blocks to make sure nothing gets randomly called
fcs.advance(0, 4, 0, nil) // H=5 fcs.advance(0, 4, 0, nil) // H=5
require.Equal(t, false, applied) require.Equal(t, false, applied)
@ -991,7 +991,7 @@ func TestCalledNull(t *testing.T) {
}, 3, 20, matchAddrMethod(t0123, 5)) }, 3, 20, matchAddrMethod(t0123, 5))
require.NoError(t, err) require.NoError(t, err)
// create few blocks to make sure nothing get's randomly called // create few blocks to make sure nothing gets randomly called
fcs.advance(0, 4, 0, nil) // H=5 fcs.advance(0, 4, 0, nil) // H=5
require.Equal(t, false, applied) require.Equal(t, false, applied)
@ -1050,7 +1050,7 @@ func TestRemoveTriggersOnMessage(t *testing.T) {
}, 3, 20, matchAddrMethod(t0123, 5)) }, 3, 20, matchAddrMethod(t0123, 5))
require.NoError(t, err) require.NoError(t, err)
// create few blocks to make sure nothing get's randomly called // create few blocks to make sure nothing gets randomly called
fcs.advance(0, 4, 0, nil) // H=5 fcs.advance(0, 4, 0, nil) // H=5
require.Equal(t, false, applied) require.Equal(t, false, applied)
@ -1155,7 +1155,7 @@ func TestStateChanged(t *testing.T) {
}) })
require.NoError(t, err) require.NoError(t, err)
// create few blocks to make sure nothing get's randomly called // create few blocks to make sure nothing gets randomly called
fcs.advance(0, 4, 0, nil) // H=5 fcs.advance(0, 4, 0, nil) // H=5
require.Equal(t, false, applied) require.Equal(t, false, applied)

View File

@ -388,7 +388,7 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a
if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight {
// Filter needs historic events // Filter needs historic events
if err := m.EventIndex.PrefillFilter(ctx, f); err != nil { if err := m.EventIndex.PrefillFilter(ctx, f, true); err != nil {
return nil, err return nil, err
} }
} }

View File

@ -481,7 +481,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
} }
// PrefillFilter fills a filter's collection of events from the historic index // PrefillFilter fills a filter's collection of events from the historic index
func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error { func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, excludeReverted bool) error {
clauses := []string{} clauses := []string{}
values := []any{} values := []any{}
joins := []string{} joins := []string{}
@ -500,6 +500,11 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error {
} }
} }
if excludeReverted {
clauses = append(clauses, "event.reverted=?")
values = append(values, false)
}
if len(f.addresses) > 0 { if len(f.addresses) > 0 {
subclauses := []string{} subclauses := []string{}
for _, addr := range f.addresses { for _, addr := range f.addresses {

View File

@ -272,7 +272,7 @@ func TestEventIndexPrefillFilter(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
tc := tc // appease lint tc := tc // appease lint
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
if err := ei.PrefillFilter(context.Background(), tc.filter); err != nil { if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil {
require.NoError(t, err, "prefill filter events") require.NoError(t, err, "prefill filter events")
} }
@ -281,3 +281,619 @@ func TestEventIndexPrefillFilter(t *testing.T) {
}) })
} }
} }
func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) {
rng := pseudo.New(pseudo.NewSource(299792458))
a1 := randomF4Addr(t, rng)
a2 := randomF4Addr(t, rng)
a3 := randomF4Addr(t, rng)
a1ID := abi.ActorID(1)
a2ID := abi.ActorID(2)
addrMap := addressMap{}
addrMap.add(a1ID, a1)
addrMap.add(a2ID, a2)
ev1 := fakeEvent(
a1ID,
[]kv{
{k: "type", v: []byte("approval")},
{k: "signer", v: []byte("addr1")},
},
[]kv{
{k: "amount", v: []byte("2988181")},
},
)
ev2 := fakeEvent(
a2ID,
[]kv{
{k: "type", v: []byte("approval")},
{k: "signer", v: []byte("addr2")},
},
[]kv{
{k: "amount", v: []byte("2988182")},
},
)
st := newStore()
events := []*types.Event{ev1}
revertedEvents := []*types.Event{ev2}
em := executedMessage{
msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)),
rct: fakeReceipt(t, rng, st, events),
evs: events,
}
revertedEm := executedMessage{
msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)),
rct: fakeReceipt(t, rng, st, revertedEvents),
evs: revertedEvents,
}
events14000 := buildTipSetEvents(t, rng, 14000, em)
revertedEvents14000 := buildTipSetEvents(t, rng, 14000, revertedEm)
cid14000, err := events14000.msgTs.Key().Cid()
require.NoError(t, err, "tipset cid")
reveredCID14000, err := revertedEvents14000.msgTs.Key().Cid()
require.NoError(t, err, "tipset cid")
noCollectedEvents := []*CollectedEvent{}
oneCollectedEvent := []*CollectedEvent{
{
Entries: ev1.Entries,
EmitterAddr: a1,
EventIdx: 0,
Reverted: false,
Height: 14000,
TipSetKey: events14000.msgTs.Key(),
MsgIdx: 0,
MsgCid: em.msg.Cid(),
},
}
twoCollectedEvent := []*CollectedEvent{
{
Entries: ev1.Entries,
EmitterAddr: a1,
EventIdx: 0,
Reverted: false,
Height: 14000,
TipSetKey: events14000.msgTs.Key(),
MsgIdx: 0,
MsgCid: em.msg.Cid(),
},
{
Entries: ev2.Entries,
EmitterAddr: a2,
EventIdx: 0,
Reverted: true,
Height: 14000,
TipSetKey: revertedEvents14000.msgTs.Key(),
MsgIdx: 0,
MsgCid: revertedEm.msg.Cid(),
},
}
oneCollectedRevertedEvent := []*CollectedEvent{
{
Entries: ev2.Entries,
EmitterAddr: a2,
EventIdx: 0,
Reverted: true,
Height: 14000,
TipSetKey: revertedEvents14000.msgTs.Key(),
MsgIdx: 0,
MsgCid: revertedEm.msg.Cid(),
},
}
workDir, err := os.MkdirTemp("", "lotusevents")
require.NoError(t, err, "create temporary work directory")
defer func() {
_ = os.RemoveAll(workDir)
}()
t.Logf("using work dir %q", workDir)
dbPath := filepath.Join(workDir, "actorevents.db")
ei, err := NewEventIndex(context.Background(), dbPath, nil)
require.NoError(t, err, "create event index")
if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil {
require.NoError(t, err, "collect reverted events")
}
if err := ei.CollectEvents(context.Background(), revertedEvents14000, true, addrMap.ResolveAddress); err != nil {
require.NoError(t, err, "revert reverted events")
}
if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil {
require.NoError(t, err, "collect events")
}
inclusiveTestCases := []struct {
name string
filter *EventFilter
te *TipSetEvents
want []*CollectedEvent
}{
{
name: "nomatch tipset min height",
filter: &EventFilter{
minHeight: 14001,
maxHeight: -1,
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch tipset max height",
filter: &EventFilter{
minHeight: -1,
maxHeight: 13999,
},
te: events14000,
want: noCollectedEvents,
},
{
name: "match tipset min height",
filter: &EventFilter{
minHeight: 14000,
maxHeight: -1,
},
te: events14000,
want: twoCollectedEvent,
},
{
name: "match tipset cid",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
tipsetCid: cid14000,
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "match tipset cid",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
tipsetCid: reveredCID14000,
},
te: revertedEvents14000,
want: oneCollectedRevertedEvent,
},
{
name: "nomatch address",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
addresses: []address.Address{a3},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "match address 2",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
addresses: []address.Address{a2},
},
te: revertedEvents14000,
want: oneCollectedRevertedEvent,
},
{
name: "match address 1",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
addresses: []address.Address{a1},
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "match one entry",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
},
},
te: events14000,
want: twoCollectedEvent,
},
{
name: "match one entry with alternate values",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("cancel"),
[]byte("propose"),
[]byte("approval"),
},
},
},
te: events14000,
want: twoCollectedEvent,
},
{
name: "nomatch one entry by missing value",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("cancel"),
[]byte("propose"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry by missing key",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"method": {
[]byte("approval"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "match one entry with multiple keys",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"signer": {
[]byte("addr1"),
},
},
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "match one entry with multiple keys",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"signer": {
[]byte("addr2"),
},
},
},
te: revertedEvents14000,
want: oneCollectedRevertedEvent,
},
{
name: "nomatch one entry with one mismatching key",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"approver": {
[]byte("addr1"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry with one mismatching value",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"signer": {
[]byte("addr3"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry with one unindexed key",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"amount": {
[]byte("2988181"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry with one unindexed key",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"amount": {
[]byte("2988182"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
}
exclusiveTestCases := []struct {
name string
filter *EventFilter
te *TipSetEvents
want []*CollectedEvent
}{
{
name: "nomatch tipset min height",
filter: &EventFilter{
minHeight: 14001,
maxHeight: -1,
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch tipset max height",
filter: &EventFilter{
minHeight: -1,
maxHeight: 13999,
},
te: events14000,
want: noCollectedEvents,
},
{
name: "match tipset min height",
filter: &EventFilter{
minHeight: 14000,
maxHeight: -1,
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "match tipset cid",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
tipsetCid: cid14000,
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "match tipset cid but reverted",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
tipsetCid: reveredCID14000,
},
te: revertedEvents14000,
want: noCollectedEvents,
},
{
name: "nomatch address",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
addresses: []address.Address{a3},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch address 2 but reverted",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
addresses: []address.Address{a2},
},
te: revertedEvents14000,
want: noCollectedEvents,
},
{
name: "match address",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
addresses: []address.Address{a1},
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "match one entry",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
},
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "match one entry with alternate values",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("cancel"),
[]byte("propose"),
[]byte("approval"),
},
},
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "nomatch one entry by missing value",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("cancel"),
[]byte("propose"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry by missing key",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"method": {
[]byte("approval"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "match one entry with multiple keys",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"signer": {
[]byte("addr1"),
},
},
},
te: events14000,
want: oneCollectedEvent,
},
{
name: "nomatch one entry with one mismatching key",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"approver": {
[]byte("addr1"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry with matching reverted value",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"signer": {
[]byte("addr2"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry with one mismatching value",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"type": {
[]byte("approval"),
},
"signer": {
[]byte("addr3"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
{
name: "nomatch one entry with one unindexed key",
filter: &EventFilter{
minHeight: -1,
maxHeight: -1,
keys: map[string][][]byte{
"amount": {
[]byte("2988181"),
},
},
},
te: events14000,
want: noCollectedEvents,
},
}
for _, tc := range inclusiveTestCases {
tc := tc // appease lint
t.Run(tc.name, func(t *testing.T) {
if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil {
require.NoError(t, err, "prefill filter events")
}
coll := tc.filter.TakeCollectedEvents(context.Background())
require.ElementsMatch(t, coll, tc.want, tc.name)
})
}
for _, tc := range exclusiveTestCases {
tc := tc // appease lint
t.Run(tc.name, func(t *testing.T) {
if err := ei.PrefillFilter(context.Background(), tc.filter, true); err != nil {
require.NoError(t, err, "prefill filter events")
}
coll := tc.filter.TakeCollectedEvents(context.Background())
require.ElementsMatch(t, coll, tc.want, tc.name)
})
}
}

View File

@ -43,9 +43,11 @@ func (t *Request) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range t.Head { for _, v := range t.Head {
if err := cbg.WriteCid(w, v); err != nil {
return xerrors.Errorf("failed writing cid field t.Head: %w", err) if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
} }
} }
// t.Length (uint64) (uint64) // t.Length (uint64) (uint64)
@ -106,12 +108,25 @@ func (t *Request) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
c, err := cbg.ReadCid(cr) {
if err != nil {
return xerrors.Errorf("reading cid field t.Head failed: %w", err) c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Head[i]: %w", err)
}
t.Head[i] = c
}
} }
t.Head[i] = c
} }
// t.Length (uint64) (uint64) // t.Length (uint64) (uint64)
@ -173,7 +188,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ErrorMessage))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ErrorMessage))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string(t.ErrorMessage)); err != nil { if _, err := cw.WriteString(string(t.ErrorMessage)); err != nil {
return err return err
} }
@ -260,13 +275,32 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v BSTipSet {
if err := v.UnmarshalCBOR(cr); err != nil {
return err b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Chain[i] = new(BSTipSet)
if err := t.Chain[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Chain[i] pointer: %w", err)
}
}
}
} }
t.Chain[i] = &v
} }
return nil return nil
@ -317,9 +351,11 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range v { for _, v := range v {
if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
return err return err
} }
} }
} }
@ -354,9 +390,11 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range v { for _, v := range v {
if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(v)); err != nil {
return err return err
} }
} }
} }
return nil return nil
@ -405,13 +443,32 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v types.Message {
if err := v.UnmarshalCBOR(cr); err != nil {
return err b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Bls[i] = new(types.Message)
if err := t.Bls[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Bls[i] pointer: %w", err)
}
}
}
} }
t.Bls[i] = &v
} }
// t.BlsIncludes ([][]uint64) (slice) // t.BlsIncludes ([][]uint64) (slice)
@ -438,6 +495,9 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
var maj byte var maj byte
var extra uint64 var extra uint64
var err error var err error
_ = maj
_ = extra
_ = err
maj, extra, err = cr.ReadHeader() maj, extra, err = cr.ReadHeader()
if err != nil { if err != nil {
@ -457,17 +517,27 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
} }
for j := 0; j < int(extra); j++ { for j := 0; j < int(extra); j++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
maj, val, err := cr.ReadHeader() {
if err != nil {
return xerrors.Errorf("failed to read uint64 for t.BlsIncludes[i] slice: %w", err) maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.BlsIncludes[i][j] = uint64(extra)
}
} }
if maj != cbg.MajUnsignedInt {
return xerrors.Errorf("value read for array t.BlsIncludes[i] was not a uint, instead got %d", maj)
}
t.BlsIncludes[i][j] = uint64(val)
} }
} }
@ -493,13 +563,32 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v types.SignedMessage {
if err := v.UnmarshalCBOR(cr); err != nil {
return err b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Secpk[i] = new(types.SignedMessage)
if err := t.Secpk[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Secpk[i] pointer: %w", err)
}
}
}
} }
t.Secpk[i] = &v
} }
// t.SecpkIncludes ([][]uint64) (slice) // t.SecpkIncludes ([][]uint64) (slice)
@ -526,6 +615,9 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
var maj byte var maj byte
var extra uint64 var extra uint64
var err error var err error
_ = maj
_ = extra
_ = err
maj, extra, err = cr.ReadHeader() maj, extra, err = cr.ReadHeader()
if err != nil { if err != nil {
@ -545,17 +637,27 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) {
} }
for j := 0; j < int(extra); j++ { for j := 0; j < int(extra); j++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
maj, val, err := cr.ReadHeader() {
if err != nil {
return xerrors.Errorf("failed to read uint64 for t.SecpkIncludes[i] slice: %w", err) maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.SecpkIncludes[i][j] = uint64(extra)
}
} }
if maj != cbg.MajUnsignedInt {
return xerrors.Errorf("value read for array t.SecpkIncludes[i] was not a uint, instead got %d", maj)
}
t.SecpkIncludes[i][j] = uint64(val)
} }
} }
@ -642,13 +744,32 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v types.BlockHeader {
if err := v.UnmarshalCBOR(cr); err != nil {
return err b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Blocks[i] = new(types.BlockHeader)
if err := t.Blocks[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Blocks[i] pointer: %w", err)
}
}
}
} }
t.Blocks[i] = &v
} }
// t.Messages (exchange.CompactedMessages) (struct) // t.Messages (exchange.CompactedMessages) (struct)

View File

@ -242,7 +242,7 @@ func (c *client) processResponse(req *Request, res *Response, tipsets []*types.T
// If we didn't request the headers they should have been provided // If we didn't request the headers they should have been provided
// by the caller. // by the caller.
if len(tipsets) < len(res.Chain) { if len(tipsets) < len(res.Chain) {
return nil, xerrors.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) return nil, xerrors.Errorf("not enough tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets))
} }
chain := make([]*BSTipSet, 0, resLength) chain := make([]*BSTipSet, 0, resLength)
for i, resChain := range res.Chain { for i, resChain := range res.Chain {
@ -381,7 +381,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque
defer span.End() defer span.End()
if span.IsRecordingEvents() { if span.IsRecordingEvents() {
span.AddAttributes( span.AddAttributes(
trace.StringAttribute("peer", peer.Pretty()), trace.StringAttribute("peer", peer.String()),
) )
} }
defer func() { defer func() {

View File

@ -578,7 +578,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto
} }
// setup Storage Miners // setup Storage Miners
stateroot, err = SetupStorageMiners(ctx, cs, sys, stateroot, template.Miners, template.NetworkVersion) stateroot, err = SetupStorageMiners(ctx, cs, sys, stateroot, template.Miners, template.NetworkVersion, false)
if err != nil { if err != nil {
return nil, xerrors.Errorf("setup miners failed: %w", err) return nil, xerrors.Errorf("setup miners failed: %w", err)
} }

View File

@ -79,7 +79,7 @@ func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder {
} }
// Note: Much of this is brittle, if the methodNum / param / return changes, it will break things // Note: Much of this is brittle, if the methodNum / param / return changes, it will break things
func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.SyscallBuilder, sroot cid.Cid, miners []genesis.Miner, nv network.Version) (cid.Cid, error) { func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.SyscallBuilder, sroot cid.Cid, miners []genesis.Miner, nv network.Version, synthetic bool) (cid.Cid, error) {
cst := cbor.NewCborStore(cs.StateBlockstore()) cst := cbor.NewCborStore(cs.StateBlockstore())
av, err := actorstypes.VersionForNetwork(nv) av, err := actorstypes.VersionForNetwork(nv)
@ -125,14 +125,18 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
sectorWeight []abi.StoragePower sectorWeight []abi.StoragePower
}, len(miners)) }, len(miners))
maxPeriods := policy.GetMaxSectorExpirationExtension() / minertypes.WPoStProvingPeriod maxLifetime, err := policy.GetMaxSectorExpirationExtension(nv)
if err != nil {
return cid.Undef, xerrors.Errorf("failed to get max extension: %w", err)
}
maxPeriods := maxLifetime / minertypes.WPoStProvingPeriod
rawPow, qaPow := big.NewInt(0), big.NewInt(0) rawPow, qaPow := big.NewInt(0), big.NewInt(0)
for i, m := range miners { for i, m := range miners {
// Create miner through power actor // Create miner through power actor
i := i i := i
m := m m := m
spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv) spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv, synthetic)
if err != nil { if err != nil {
return cid.Undef, err return cid.Undef, err
} }

View File

@ -131,7 +131,7 @@ func NewMsgIndex(lctx context.Context, basePath string, cs ChainStore) (MsgIndex
db, err := sql.Open("sqlite3", dbPath) db, err := sql.Open("sqlite3", dbPath)
if err != nil { if err != nil {
// TODO [nice to have]: automaticaly delete corrupt databases // TODO [nice to have]: automatically delete corrupt databases
// but for now we can just error and let the operator delete. // but for now we can just error and let the operator delete.
return nil, xerrors.Errorf("error opening msgindex database: %w", err) return nil, xerrors.Errorf("error opening msgindex database: %w", err)
} }

View File

@ -21,6 +21,7 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/minio/blake2b-simd" "github.com/minio/blake2b-simd"
"github.com/raulk/clock" "github.com/raulk/clock"
"go.opencensus.io/stats"
"golang.org/x/xerrors" "golang.org/x/xerrors"
ffi "github.com/filecoin-project/filecoin-ffi" ffi "github.com/filecoin-project/filecoin-ffi"
@ -210,8 +211,10 @@ func ComputeRBF(curPrem abi.TokenAmount, replaceByFeeRatio types.Percent) abi.To
func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) { func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) {
var maxFee abi.TokenAmount var maxFee abi.TokenAmount
var maximizeFeeCap bool
if sendSpec != nil { if sendSpec != nil {
maxFee = sendSpec.MaxFee maxFee = sendSpec.MaxFee
maximizeFeeCap = sendSpec.MaximizeFeeCap
} }
if maxFee.Int == nil || maxFee.Equals(big.Zero()) { if maxFee.Int == nil || maxFee.Equals(big.Zero()) {
mf, err := mff() mf, err := mff()
@ -222,15 +225,12 @@ func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.M
maxFee = mf maxFee = mf
} }
gl := types.NewInt(uint64(msg.GasLimit)) gaslimit := types.NewInt(uint64(msg.GasLimit))
totalFee := types.BigMul(msg.GasFeeCap, gl) totalFee := types.BigMul(msg.GasFeeCap, gaslimit)
if maximizeFeeCap || totalFee.GreaterThan(maxFee) {
if totalFee.LessThanEqual(maxFee) { msg.GasFeeCap = big.Div(maxFee, gaslimit)
msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
return
} }
msg.GasFeeCap = big.Div(maxFee, gl)
msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap
} }
@ -1022,6 +1022,9 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st
} }
}) })
// Record the current size of the Mpool
stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize)))
return nil return nil
} }
@ -1214,6 +1217,9 @@ func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce u
return return
} }
} }
// Record the current size of the Mpool
stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize)))
} }
func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) {

View File

@ -571,7 +571,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error
} }
// no need to record anything here, there are no duplicates in the actors HAMT // no need to record anything here, there are no duplicates in the actors HAMT
// iself. // itself.
if _, ok := seen[addr]; ok { if _, ok := seen[addr]; ok {
return nil return nil
} }
@ -589,7 +589,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error
} }
// no need to record anything here, there are no duplicates in the actors HAMT // no need to record anything here, there are no duplicates in the actors HAMT
// iself. // itself.
if _, ok := seen[addr]; ok { if _, ok := seen[addr]; ok {
return nil return nil
} }

View File

@ -212,13 +212,8 @@ func (cs *ChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) (
var out []types.ChainMsg var out []types.ChainMsg
for _, bm := range bmsgs { for _, bm := range bmsgs {
for _, blsm := range bm.BlsMessages { out = append(out, bm.BlsMessages...)
out = append(out, blsm) out = append(out, bm.SecpkMessages...)
}
for _, secm := range bm.SecpkMessages {
out = append(out, secm)
}
} }
return out, nil return out, nil

View File

@ -167,7 +167,7 @@ func (t walkSchedTaskType) String() string {
case dagTask: case dagTask:
return "dag" return "dag"
} }
panic(fmt.Sprintf("unknow task %d", t)) panic(fmt.Sprintf("unknown task %d", t))
} }
type walkTask struct { type walkTask struct {
@ -656,9 +656,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe
} }
if b.Height > 0 { if b.Height > 0 {
for _, p := range b.Parents { blocksToWalk = append(blocksToWalk, b.Parents...)
blocksToWalk = append(blocksToWalk, p)
}
} else { } else {
// include the genesis block // include the genesis block
cids = append(cids, b.Parents...) cids = append(cids, b.Parents...)

View File

@ -516,7 +516,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
return pubsub.ValidationReject return pubsub.ValidationReject
} }
if len(idxrMsg.ExtraData) == 0 { if len(idxrMsg.ExtraData) == 0 {
log.Debugw("ignoring messsage missing miner id", "peer", originPeer) log.Debugw("ignoring message missing miner id", "peer", originPeer)
return pubsub.ValidationIgnore return pubsub.ValidationIgnore
} }
@ -552,7 +552,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
// Check that the miner ID maps to the peer that sent the message. // Check that the miner ID maps to the peer that sent the message.
err = v.authenticateMessage(ctx, minerAddr, originPeer) err = v.authenticateMessage(ctx, minerAddr, originPeer)
if err != nil { if err != nil {
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerAddr) log.Warnw("cannot authenticate message", "err", err, "peer", originPeer, "minerID", minerAddr)
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1)) stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
return pubsub.ValidationReject return pubsub.ValidationReject
} }

View File

@ -844,7 +844,7 @@ loop:
return nil, xerrors.Errorf("failed to load next local tipset: %w", err) return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
} }
if base.IsChildOf(knownParent) { if base.IsChildOf(knownParent) {
// common case: receiving a block thats potentially part of the same tipset as our best block // common case: receiving a block that's potentially part of the same tipset as our best block
return blockSet, nil return blockSet, nil
} }
@ -1094,8 +1094,8 @@ func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet
requestErr = multierror.Append(requestErr, err) requestErr = multierror.Append(requestErr, err)
} else { } else {
isGood := true isGood := true
for index, ts := range headers[nextI:lastI] { for index, cm := range result {
cm := result[index] ts := headers[nextI+index]
if err := checkMsgMeta(ts, cm.Bls, cm.Secpk, cm.BlsIncludes, cm.SecpkIncludes); err != nil { if err := checkMsgMeta(ts, cm.Bls, cm.Secpk, cm.BlsIncludes, cm.SecpkIncludes); err != nil {
log.Errorf("fetched messages not as expected: %s", err) log.Errorf("fetched messages not as expected: %s", err)
isGood = false isGood = false

View File

@ -91,9 +91,11 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range t.Parents { for _, v := range t.Parents {
if err := cbg.WriteCid(w, v); err != nil {
return xerrors.Errorf("failed writing cid field t.Parents: %w", err) if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
} }
} }
// t.ParentWeight (big.Int) (struct) // t.ParentWeight (big.Int) (struct)
@ -249,13 +251,22 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v BeaconEntry {
if err := v.UnmarshalCBOR(cr); err != nil {
return err if err := t.BeaconEntries[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.BeaconEntries[i]: %w", err)
}
}
} }
t.BeaconEntries[i] = v
} }
// t.WinPoStProof ([]proof.PoStProof) (slice) // t.WinPoStProof ([]proof.PoStProof) (slice)
@ -278,13 +289,22 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v proof.PoStProof {
if err := v.UnmarshalCBOR(cr); err != nil {
return err if err := t.WinPoStProof[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.WinPoStProof[i]: %w", err)
}
}
} }
t.WinPoStProof[i] = v
} }
// t.Parents ([]cid.Cid) (slice) // t.Parents ([]cid.Cid) (slice)
@ -307,12 +327,25 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
c, err := cbg.ReadCid(cr) {
if err != nil {
return xerrors.Errorf("reading cid field t.Parents failed: %w", err) c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Parents[i]: %w", err)
}
t.Parents[i] = c
}
} }
t.Parents[i] = c
} }
// t.ParentWeight (big.Int) (struct) // t.ParentWeight (big.Int) (struct)
@ -1318,9 +1351,11 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range t.BlsMessages { for _, v := range t.BlsMessages {
if err := cbg.WriteCid(w, v); err != nil {
return xerrors.Errorf("failed writing cid field t.BlsMessages: %w", err) if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
} }
} }
// t.SecpkMessages ([]cid.Cid) (slice) // t.SecpkMessages ([]cid.Cid) (slice)
@ -1332,9 +1367,11 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range t.SecpkMessages { for _, v := range t.SecpkMessages {
if err := cbg.WriteCid(w, v); err != nil {
return xerrors.Errorf("failed writing cid field t.SecpkMessages: %w", err) if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
} }
} }
return nil return nil
} }
@ -1401,12 +1438,25 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
c, err := cbg.ReadCid(cr) {
if err != nil {
return xerrors.Errorf("reading cid field t.BlsMessages failed: %w", err) c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.BlsMessages[i]: %w", err)
}
t.BlsMessages[i] = c
}
} }
t.BlsMessages[i] = c
} }
// t.SecpkMessages ([]cid.Cid) (slice) // t.SecpkMessages ([]cid.Cid) (slice)
@ -1429,12 +1479,25 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
c, err := cbg.ReadCid(cr) {
if err != nil {
return xerrors.Errorf("reading cid field t.SecpkMessages failed: %w", err) c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.SecpkMessages[i]: %w", err)
}
t.SecpkMessages[i] = c
}
} }
t.SecpkMessages[i] = c
} }
return nil return nil
@ -1463,9 +1526,11 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error {
return err return err
} }
for _, v := range t.Cids { for _, v := range t.Cids {
if err := cbg.WriteCid(w, v); err != nil {
return xerrors.Errorf("failed writing cid field t.Cids: %w", err) if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
} }
} }
// t.Blocks ([]*types.BlockHeader) (slice) // t.Blocks ([]*types.BlockHeader) (slice)
@ -1538,12 +1603,25 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
c, err := cbg.ReadCid(cr) {
if err != nil {
return xerrors.Errorf("reading cid field t.Cids failed: %w", err) c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Cids[i]: %w", err)
}
t.Cids[i] = c
}
} }
t.Cids[i] = c
} }
// t.Blocks ([]*types.BlockHeader) (slice) // t.Blocks ([]*types.BlockHeader) (slice)
@ -1566,13 +1644,32 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v BlockHeader {
if err := v.UnmarshalCBOR(cr); err != nil {
return err b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Blocks[i] = new(BlockHeader)
if err := t.Blocks[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Blocks[i] pointer: %w", err)
}
}
}
} }
t.Blocks[i] = &v
} }
// t.Height (abi.ChainEpoch) (int64) // t.Height (abi.ChainEpoch) (int64)
@ -1933,13 +2030,22 @@ func (t *Event) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v EventEntry {
if err := v.UnmarshalCBOR(cr); err != nil {
return err if err := t.Entries[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Entries[i]: %w", err)
}
}
} }
t.Entries[i] = v
} }
return nil return nil
@ -1972,7 +2078,7 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string(t.Key)); err != nil { if _, err := cw.WriteString(string(t.Key)); err != nil {
return err return err
} }
@ -2103,7 +2209,7 @@ func (t *GasTrace) MarshalCBOR(w io.Writer) error {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
return err return err
} }
if _, err := io.WriteString(w, string(t.Name)); err != nil { if _, err := cw.WriteString(string(t.Name)); err != nil {
return err return err
} }
@ -2756,13 +2862,32 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v GasTrace {
if err := v.UnmarshalCBOR(cr); err != nil {
return err b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.GasCharges[i] = new(GasTrace)
if err := t.GasCharges[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.GasCharges[i] pointer: %w", err)
}
}
}
} }
t.GasCharges[i] = &v
} }
// t.Subcalls ([]types.ExecutionTrace) (slice) // t.Subcalls ([]types.ExecutionTrace) (slice)
@ -2785,13 +2910,22 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) {
} }
for i := 0; i < int(extra); i++ { for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
var v ExecutionTrace {
if err := v.UnmarshalCBOR(cr); err != nil {
return err if err := t.Subcalls[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Subcalls[i]: %w", err)
}
}
} }
t.Subcalls[i] = v
} }
return nil return nil

View File

@ -140,7 +140,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt: case cbg.MajNegativeInt:
extraI = int64(extra) extraI = int64(extra)
if extraI < 0 { if extraI < 0 {
return fmt.Errorf("int64 negative oveflow") return fmt.Errorf("int64 negative overflow")
} }
extraI = -1 - extraI extraI = -1 - extraI
default: default:
@ -186,7 +186,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt: case cbg.MajNegativeInt:
extraI = int64(extra) extraI = int64(extra)
if extraI < 0 { if extraI < 0 {
return fmt.Errorf("int64 negative oveflow") return fmt.Errorf("int64 negative overflow")
} }
extraI = -1 - extraI extraI = -1 - extraI
default: default:
@ -278,7 +278,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt: case cbg.MajNegativeInt:
extraI = int64(extra) extraI = int64(extra)
if extraI < 0 { if extraI < 0 {
return fmt.Errorf("int64 negative oveflow") return fmt.Errorf("int64 negative overflow")
} }
extraI = -1 - extraI extraI = -1 - extraI
default: default:
@ -324,7 +324,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt: case cbg.MajNegativeInt:
extraI = int64(extra) extraI = int64(extra)
if extraI < 0 { if extraI < 0 {
return fmt.Errorf("int64 negative oveflow") return fmt.Errorf("int64 negative overflow")
} }
extraI = -1 - extraI extraI = -1 - extraI
default: default:

View File

@ -282,7 +282,7 @@ var NetDisconnect = &cli.Command{
fmt.Println("failure") fmt.Println("failure")
return err return err
} }
fmt.Printf("disconnect %s: ", pid.Pretty()) fmt.Printf("disconnect %s: ", pid)
err = api.NetDisconnect(ctx, pid) err = api.NetDisconnect(ctx, pid)
if err != nil { if err != nil {
fmt.Println("failure") fmt.Println("failure")
@ -312,7 +312,7 @@ var NetConnect = &cli.Command{
} }
for _, pi := range pis { for _, pi := range pis {
fmt.Printf("connect %s: ", pi.ID.Pretty()) fmt.Printf("connect %s: ", pi.ID)
err := api.NetConnect(ctx, pi) err := api.NetConnect(ctx, pi)
if err != nil { if err != nil {
fmt.Println("failure") fmt.Println("failure")

View File

@ -1920,8 +1920,29 @@ var StateSysActorCIDsCmd = &cli.Command{
if err != nil { if err != nil {
return err return err
} }
for name, cid := range actorsCids {
_, _ = fmt.Fprintf(tw, "%v\t%v\n", name, cid) var actorsCidTuples []struct {
actorName string
actorCid cid.Cid
}
for name, actorCid := range actorsCids {
keyVal := struct {
actorName string
actorCid cid.Cid
}{
actorName: name,
actorCid: actorCid,
}
actorsCidTuples = append(actorsCidTuples, keyVal)
}
sort.Slice(actorsCidTuples, func(i, j int) bool {
return actorsCidTuples[i].actorName < actorsCidTuples[j].actorName
})
for _, keyVal := range actorsCidTuples {
_, _ = fmt.Fprintf(tw, "%v\t%v\n", keyVal.actorName, keyVal.actorCid)
} }
return tw.Flush() return tw.Flush()
}, },

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
cliutil "github.com/filecoin-project/lotus/cli/util"
) )
var SyncCmd = &cli.Command{ var SyncCmd = &cli.Command{
@ -262,6 +263,9 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
} }
firstApp = state.VMApplied firstApp = state.VMApplied
// eta computes the ETA for the sync to complete (with a lookback of 10 processed items)
eta := cliutil.NewETA(10)
for { for {
state, err := napi.SyncState(ctx) state, err := napi.SyncState(ctx)
if err != nil { if err != nil {
@ -312,8 +316,10 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error {
fmt.Print("\r\x1b[2K\x1b[A") fmt.Print("\r\x1b[2K\x1b[A")
} }
todo := theight - ss.Height
fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff) fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff)
fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height) fmt.Printf("State: %s; Current Epoch: %d; Todo: %d, ETA: %s\n", ss.Stage, ss.Height, todo, eta.Update(int64(todo)))
lastLines = 2 lastLines = 2
if i%samples == 0 { if i%samples == 0 {

94
cli/util/eta.go Normal file
View File

@ -0,0 +1,94 @@
package cliutil
import (
"fmt"
"math"
"time"
)
// ETA implements a very simple eta calculator based on the number of remaining items. It does not
// require knowing the work size in advance and is therefore suitable for streaming workloads and
// also does not require that consecutive updates have a monotonically decreasing remaining value.
type ETA struct {
// max number of items to keep in memory
maxItems int
// a queue of most recently updated items
items []item
// we store the last calculated ETA which we reuse if there was not change in remaining items
lastETA string
}
type item struct {
timestamp time.Time
remaining int64
}
// NewETA creates a new ETA calculator of the given size
func NewETA(maxItems int) *ETA {
return &ETA{
maxItems: maxItems,
items: make([]item, 0),
}
}
// Update updates the ETA calculator with the remaining number of items and returns the ETA
func (e *ETA) Update(remaining int64) string {
item := item{
timestamp: time.Now(),
remaining: remaining,
}
if len(e.items) == 0 {
e.items = append(e.items, item)
return ""
}
if e.items[len(e.items)-1].remaining == remaining {
// we ignore updates with the same remaining value and just return the previous ETA
return e.lastETA
} else if e.items[len(e.items)-1].remaining < remaining {
// remaining went up from previous update, lets estimate how many items were processed using the
// average number processed items in the queue.
var avgProcessedPerItem int64 = 1
if len(e.items) > 1 {
diffRemaining := e.items[0].remaining - e.items[len(e.items)-1].remaining
avgProcessedPerItem = int64(math.Round(float64(diffRemaining) / float64(len(e.items))))
}
// diff is the difference in increase in remaining since last update plus the average number of processed
// items we estimate that were processed this round
diff := remaining - e.items[len(e.items)-1].remaining + avgProcessedPerItem
// we update all items in the queue by shifting their remaining value accordingly. This means that we
// always have strictly decreasing remaining values in the queue
for i := range e.items {
e.items[i].remaining += diff
}
}
// append the item to the queue and remove the oldest item if needed
if len(e.items) >= e.maxItems {
e.items = e.items[1:]
}
e.items = append(e.items, item)
// calculate the average processing time per item in the queue
diffMs := e.items[len(e.items)-1].timestamp.Sub(e.items[0].timestamp).Milliseconds()
nrItemsProcessed := e.items[0].remaining - e.items[len(e.items)-1].remaining
avg := diffMs / nrItemsProcessed
// use that average processing time to estimate how long the remaining items will take
// and cache that ETA so we don't have to recalculate it on every update unless the
// remaining value changes
e.lastETA = msToETA(avg * remaining)
return e.lastETA
}
func msToETA(ms int64) string {
seconds := ms / 1000
minutes := seconds / 60
hours := minutes / 60
return fmt.Sprintf("%02dh:%02dm:%02ds", hours, minutes%60, seconds%60)
}

Some files were not shown because too many files have changed in this diff Show More