diff --git a/.circleci/config.yml b/.circleci/config.yml index a0d5c4c38..eae5e07c8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -933,6 +933,8 @@ workflows: - build suite: itest-sector_pledge target: "./itests/sector_pledge_test.go" + get-params: true + - test: name: test-itest-sector_terminate requires: @@ -1016,6 +1018,7 @@ workflows: suite: utest-unit-cli target: "./cli/... ./cmd/... ./api/..." get-params: true + executor: golang-2xl - test: name: test-unit-node requires: @@ -1023,6 +1026,7 @@ workflows: suite: utest-unit-node target: "./node/..." + - test: name: test-unit-rest requires: @@ -1030,6 +1034,7 @@ workflows: suite: utest-unit-rest target: "./blockstore/... ./build/... ./chain/... ./conformance/... ./gateway/... ./journal/... ./lib/... ./markets/... ./paychmgr/... ./provider/... ./tools/..." + executor: golang-2xl - test: name: test-unit-storage requires: @@ -1037,6 +1042,7 @@ workflows: suite: utest-unit-storage target: "./storage/... ./extern/..." + - test: go-test-flags: "-run=TestMulticoreSDR" requires: diff --git a/.circleci/template.yml b/.circleci/template.yml index 0f7a4e031..7bc84c218 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -554,7 +554,7 @@ workflows: [[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]] resource_class: 2xlarge [[- end]] - [[- if (eq $name "wdpost")]] + [[- if or (eq $name "wdpost") (eq $name "sector_pledge")]] get-params: true [[end]] [[- end ]][[- end]] @@ -567,6 +567,8 @@ workflows: suite: utest-[[ $suite ]] target: "[[ $pkgs ]]" [[if eq $suite "unit-cli"]]get-params: true[[end]] + [[if eq $suite "unit-cli"]]executor: golang-2xl[[end]] + [[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]] [[- end]] - test: go-test-flags: "-run=TestMulticoreSDR" diff --git a/.gitignore b/.gitignore index fdcc773d1..c40a76fd0 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,7 @@ build/paramfetch.sh bin/ipget bin/tmp/* .idea +.vscode scratchpad build/builtin-actors/v* diff --git a/CHANGELOG.md b/CHANGELOG.md index 9076e978f..3ea256e1f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ # Lotus changelog # UNRELEASED +- chore: Auto remove local chain data when importing chain file or snapshot ([filecoin-project/lotus#11277](https://github.com/filecoin-project/lotus/pull/11277)) +- feat: metric: export Mpool message count ([filecoin-project/lotus#11361](https://github.com/filecoin-project/lotus/pull/11361)) ## New features - feat: Added new tracing API (**HIGHLY EXPERIMENTAL**) supporting two RPC methods: `trace_block` and `trace_replayBlockTransactions` ([filecoin-project/lotus#11100](https://github.com/filecoin-project/lotus/pull/11100)) diff --git a/LOTUS_RELEASE_FLOW.md b/LOTUS_RELEASE_FLOW.md index 4a327125a..8bb02d3c5 100644 --- a/LOTUS_RELEASE_FLOW.md +++ b/LOTUS_RELEASE_FLOW.md @@ -73,7 +73,7 @@ All releases under an odd minor version number indicate **feature releases**. Th Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number. -We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the Release Cycle section (TODO: Link). +We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the [Release Cycle section](#release-cycle). ### Examples Scenarios diff --git a/README.md b/README.md index f6ac75932..c944d41e6 100644 --- a/README.md +++ b/README.md @@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l #### Go -To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/): +To build Lotus, you need a working installation of [Go 1.20.10 or higher](https://golang.org/dl/): ```bash -wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +wget -c https://golang.org/dl/go1.20.10.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local ``` **TIP:** @@ -133,6 +133,8 @@ Note: The default branch `master` is the dev branch where the latest new feature 6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain). +7. (Optional) Follow the [Setting Up Prometheus and Grafana](https://github.com/filecoin-project/lotus/tree/master/metrics/README.md) guide for detailed instructions on setting up a working monitoring system running against a local running lotus node. + ## License Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) diff --git a/api/api_full.go b/api/api_full.go index 55dcc23df..f919bc13b 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -21,7 +21,6 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin/v8/paych" "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -29,7 +28,7 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" @@ -499,9 +498,9 @@ type FullNode interface { // expiration epoch StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read // StateSectorExpiration returns epoch at which given sector will expire - StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*lminer.SectorExpiration, error) //perm:read + StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read // StateSectorPartition finds deadline/partition with the specified sector - StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*lminer.SectorLocation, error) //perm:read + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read // StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed // // NOTE: If a replacing message is found on chain, this method will return diff --git a/api/api_gateway.go b/api/api_gateway.go index 08199564d..27e725457 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -9,11 +9,11 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/dline" apitypes "github.com/filecoin-project/lotus/api/types" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" diff --git a/api/api_storage.go b/api/api_storage.go index a9e632998..d5b3d5c1d 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -19,10 +19,10 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin/v9/market" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" abinetwork "github.com/filecoin-project/go-state-types/network" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" @@ -461,10 +461,15 @@ type SectorOffset struct { // DealInfo is a tuple of deal identity and its schedule type PieceDealInfo struct { + // "Old" builtin-market deal info PublishCid *cid.Cid DealID abi.DealID DealProposal *market.DealProposal + + // Common deal info DealSchedule DealSchedule + + // Best-effort deal asks KeepUnsealed bool } diff --git a/api/cbor_gen.go b/api/cbor_gen.go index 80392b212..fd2cb30b4 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -42,7 +42,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Channel"))); err != nil { return err } - if _, err := io.WriteString(w, string("Channel")); err != nil { + if _, err := cw.WriteString(string("Channel")); err != nil { return err } @@ -58,7 +58,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Vouchers"))); err != nil { return err } - if _, err := io.WriteString(w, string("Vouchers")); err != nil { + if _, err := cw.WriteString(string("Vouchers")); err != nil { return err } @@ -83,7 +83,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil { return err } - if _, err := io.WriteString(w, string("WaitSentinel")); err != nil { + if _, err := cw.WriteString(string("WaitSentinel")); err != nil { return err } @@ -163,13 +163,32 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v paych.SignedVoucher - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Vouchers[i] = new(paych.SignedVoucher) + if err := t.Vouchers[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Vouchers[i] pointer: %w", err) + } + } + + } } - - t.Vouchers[i] = &v } // t.WaitSentinel (cid.Cid) (struct) @@ -214,7 +233,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil { return err } - if _, err := io.WriteString(w, string("Size")); err != nil { + if _, err := cw.WriteString(string("Size")); err != nil { return err } @@ -230,7 +249,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Offset"))); err != nil { return err } - if _, err := io.WriteString(w, string("Offset")); err != nil { + if _, err := cw.WriteString(string("Offset")); err != nil { return err } @@ -246,7 +265,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil { return err } - if _, err := io.WriteString(w, string("SectorID")); err != nil { + if _, err := cw.WriteString(string("SectorID")); err != nil { return err } @@ -369,7 +388,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Refs"))); err != nil { return err } - if _, err := io.WriteString(w, string("Refs")); err != nil { + if _, err := cw.WriteString(string("Refs")); err != nil { return err } @@ -447,13 +466,22 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v SealedRef - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.Refs[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Refs[i]: %w", err) + } + + } } - - t.Refs[i] = v } default: @@ -484,7 +512,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { return err } - if _, err := io.WriteString(w, string("Epoch")); err != nil { + if _, err := cw.WriteString(string("Epoch")); err != nil { return err } @@ -506,7 +534,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil { return err } - if _, err := io.WriteString(w, string("Value")); err != nil { + if _, err := cw.WriteString(string("Value")); err != nil { return err } @@ -639,7 +667,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil { return err } - if _, err := io.WriteString(w, string("Epoch")); err != nil { + if _, err := cw.WriteString(string("Epoch")); err != nil { return err } @@ -661,7 +689,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil { return err } - if _, err := io.WriteString(w, string("Value")); err != nil { + if _, err := cw.WriteString(string("Value")); err != nil { return err } @@ -794,7 +822,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { return err } - if _, err := io.WriteString(w, string("DealID")); err != nil { + if _, err := cw.WriteString(string("DealID")); err != nil { return err } @@ -810,7 +838,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { return err } - if _, err := io.WriteString(w, string("PublishCid")); err != nil { + if _, err := cw.WriteString(string("PublishCid")); err != nil { return err } @@ -832,7 +860,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { return err } - if _, err := io.WriteString(w, string("DealProposal")); err != nil { + if _, err := cw.WriteString(string("DealProposal")); err != nil { return err } @@ -848,7 +876,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { return err } - if _, err := io.WriteString(w, string("DealSchedule")); err != nil { + if _, err := cw.WriteString(string("DealSchedule")); err != nil { return err } @@ -864,7 +892,7 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { return err } - if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil { + if _, err := cw.WriteString(string("KeepUnsealed")); err != nil { return err } @@ -1027,7 +1055,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil { return err } - if _, err := io.WriteString(w, string("Piece")); err != nil { + if _, err := cw.WriteString(string("Piece")); err != nil { return err } @@ -1043,7 +1071,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealInfo"))); err != nil { return err } - if _, err := io.WriteString(w, string("DealInfo")); err != nil { + if _, err := cw.WriteString(string("DealInfo")); err != nil { return err } @@ -1150,7 +1178,7 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { return err } - if _, err := io.WriteString(w, string("EndEpoch")); err != nil { + if _, err := cw.WriteString(string("EndEpoch")); err != nil { return err } @@ -1172,7 +1200,7 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { return err } - if _, err := io.WriteString(w, string("StartEpoch")); err != nil { + if _, err := cw.WriteString(string("StartEpoch")); err != nil { return err } diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 1082e8f4c..2d1333495 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -26,7 +26,6 @@ import ( "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -35,7 +34,7 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/journal/alerting" @@ -564,11 +563,11 @@ type FullNodeMethods struct { StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"` - StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"` + StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"` StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` - StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"` + StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"` StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) `perm:"read"` @@ -3829,14 +3828,14 @@ func (s *FullNodeStub) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 return nil, ErrNotSupported } -func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { +func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { if s.Internal.StateSectorExpiration == nil { return nil, ErrNotSupported } return s.Internal.StateSectorExpiration(p0, p1, p2, p3) } -func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { +func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { return nil, ErrNotSupported } @@ -3851,14 +3850,14 @@ func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address return nil, ErrNotSupported } -func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { +func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { if s.Internal.StateSectorPartition == nil { return nil, ErrNotSupported } return s.Internal.StateSectorPartition(p0, p1, p2, p3) } -func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { +func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { return nil, ErrNotSupported } diff --git a/api/types.go b/api/types.go index 96f9fa63d..93ed4083f 100644 --- a/api/types.go +++ b/api/types.go @@ -19,8 +19,8 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer/v2" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -56,9 +56,17 @@ type PubsubScore struct { Score *pubsub.PeerScoreSnapshot } +// MessageSendSpec contains optional fields which modify message sending behavior type MessageSendSpec struct { - MaxFee abi.TokenAmount + // MaxFee specifies a cap on network fees related to this message + MaxFee abi.TokenAmount + + // MsgUuid specifies a unique message identifier which can be used on node (or node cluster) + // level to prevent double-sends of messages even when nonce generation is not handled by sender MsgUuid uuid.UUID + + // MaximizeFeeCap makes message FeeCap be based entirely on MaxFee + MaximizeFeeCap bool } type MpoolMessageWhole struct { diff --git a/api/v0api/full.go b/api/v0api/full.go index 322f72449..d92d5a95c 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -15,7 +15,6 @@ import ( "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -23,7 +22,7 @@ import ( "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -450,9 +449,9 @@ type FullNode interface { // expiration epoch StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read // StateSectorExpiration returns epoch at which given sector will expire - StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*lminer.SectorExpiration, error) //perm:read + StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read // StateSectorPartition finds deadline/partition with the specified sector - StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*lminer.SectorLocation, error) //perm:read + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed // // NOTE: If a replacing message is found on chain, this method will return diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go index df9153b8d..1a7f7d3ac 100644 --- a/api/v0api/gateway.go +++ b/api/v0api/gateway.go @@ -8,12 +8,12 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/dline" abinetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" ) diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index 22faa3acc..bd37f6429 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -17,7 +17,6 @@ import ( "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -25,7 +24,7 @@ import ( "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -355,11 +354,11 @@ type FullNodeMethods struct { StateSearchMsgLimited func(p0 context.Context, p1 cid.Cid, p2 abi.ChainEpoch) (*api.MsgLookup, error) `perm:"read"` - StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) `perm:"read"` + StateSectorExpiration func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) `perm:"read"` StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` - StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) `perm:"read"` + StateSectorPartition func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) `perm:"read"` StateSectorPreCommitInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` @@ -2245,14 +2244,14 @@ func (s *FullNodeStub) StateSearchMsgLimited(p0 context.Context, p1 cid.Cid, p2 return nil, ErrNotSupported } -func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { +func (s *FullNodeStruct) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { if s.Internal.StateSectorExpiration == nil { return nil, ErrNotSupported } return s.Internal.StateSectorExpiration(p0, p1, p2, p3) } -func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorExpiration, error) { +func (s *FullNodeStub) StateSectorExpiration(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorExpiration, error) { return nil, ErrNotSupported } @@ -2267,14 +2266,14 @@ func (s *FullNodeStub) StateSectorGetInfo(p0 context.Context, p1 address.Address return nil, ErrNotSupported } -func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { +func (s *FullNodeStruct) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { if s.Internal.StateSectorPartition == nil { return nil, ErrNotSupported } return s.Internal.StateSectorPartition(p0, p1, p2, p3) } -func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*lminer.SectorLocation, error) { +func (s *FullNodeStub) StateSectorPartition(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorLocation, error) { return nil, ErrNotSupported } diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go index f58b0420f..265674e71 100644 --- a/api/v0api/v1_wrapper.go +++ b/api/v0api/v1_wrapper.go @@ -12,11 +12,11 @@ import ( "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" marketevents "github.com/filecoin-project/lotus/markets/loggers" ) diff --git a/blockstore/cbor_gen.go b/blockstore/cbor_gen.go index b8ebdb474..221f13676 100644 --- a/blockstore/cbor_gen.go +++ b/blockstore/cbor_gen.go @@ -52,9 +52,11 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Cid { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Cid: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } // t.Data ([][]uint8) (slice) @@ -151,12 +153,25 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Cid failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Cid[i]: %w", err) + } + + t.Cid[i] = c + + } } - t.Cid[i] = c } // t.Data ([][]uint8) (slice) @@ -183,6 +198,9 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { var maj byte var extra uint64 var err error + _ = maj + _ = extra + _ = err maj, extra, err = cr.ReadHeader() if err != nil { @@ -350,7 +368,7 @@ func (t *NetRpcErr) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Msg))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Msg)); err != nil { + if _, err := cw.WriteString(string(t.Msg)); err != nil { return err } diff --git a/blockstore/idstore.go b/blockstore/idstore.go index fb575dca7..a10aee575 100644 --- a/blockstore/idstore.go +++ b/blockstore/idstore.go @@ -183,3 +183,17 @@ func (b *idstore) Close() error { func (b *idstore) Flush(ctx context.Context) error { return b.bs.Flush(ctx) } + +func (b *idstore) CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error { + if bs, ok := b.bs.(BlockstoreGC); ok { + return bs.CollectGarbage(ctx, options...) + } + return xerrors.Errorf("not supported") +} + +func (b *idstore) GCOnce(ctx context.Context, options ...BlockstoreGCOption) error { + if bs, ok := b.bs.(BlockstoreGCOnce); ok { + return bs.GCOnce(ctx, options...) + } + return xerrors.Errorf("not supported") +} diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 534565bf3..47caca886 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -66,8 +66,9 @@ var ( ) const ( - batchSize = 16384 - cidKeySize = 128 + batchSize = 16384 + cidKeySize = 128 + purgeWorkSliceDuration = time.Second ) func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { @@ -1372,9 +1373,21 @@ func (s *SplitStore) purge(coldr *ColdSetReader, checkpoint *Checkpoint, markSet return err } + now := time.Now() + err := coldr.ForEach(func(c cid.Cid) error { batch = append(batch, c) if len(batch) == batchSize { + // add some time slicing to the purge as this a very disk I/O heavy operation that + // requires write access to txnLk that may starve other operations that require + // access to the blockstore. + elapsed := time.Since(now) + if elapsed > purgeWorkSliceDuration { + // work 1 slice, sleep 4 slices, or 20% utilization + time.Sleep(4 * elapsed) + now = time.Now() + } + return deleteBatch() } diff --git a/build/bootstrap/butterflynet.pi b/build/bootstrap/butterflynet.pi index c7e9b2e92..3f5033361 100644 --- a/build/bootstrap/butterflynet.pi +++ b/build/bootstrap/butterflynet.pi @@ -1,2 +1,2 @@ -/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWD5mtdmjHQ1Puj9Md7SEfoa7kWMpwqUhAKsyYsBP56LQC -/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWEoYPkm6o87ES6AppFY7d7WHJUQg7XVPRAyQZjEU31efQ +/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWRaoPgwJuZdPSN4A2iTeh8xzkZGCEBxan9vMkidHisUgn +/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWMjLCZeEf3VzSWvQYuhe9VzCcrN6RENX9FgmQqiJfQDWs diff --git a/build/bootstrap/mainnet.pi b/build/bootstrap/mainnet.pi index 3e09b3d14..9f7d887b4 100644 --- a/build/bootstrap/mainnet.pi +++ b/build/bootstrap/mainnet.pi @@ -1,9 +1,6 @@ /dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj /dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc /dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4 -/dns4/bootstrap-3.mainnet.filops.net/tcp/1347/p2p/12D3KooWKhgq8c7NQ9iGjbyK7v7phXvG6492HQfiDaGHLHLQjk7R -/dns4/bootstrap-4.mainnet.filops.net/tcp/1347/p2p/12D3KooWL6PsFNPhYftrJzGgF5U18hFoaVhfGk7xwzD8yVrHJ3Uc -/dns4/bootstrap-5.mainnet.filops.net/tcp/1347/p2p/12D3KooWLFynvDQiUpXoHroV1YxKHhPJgysQGH2k3ZGwtWzR4dFH /dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ /dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf /dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR @@ -13,4 +10,5 @@ /dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt /dns4/bootstrap-0.ipfsmain.cn/tcp/34721/p2p/12D3KooWQnwEGNqcM2nAcPtRR9rAX8Hrg4k9kJLCHoTR5chJfz6d /dns4/bootstrap-1.ipfsmain.cn/tcp/34723/p2p/12D3KooWMKxMkD5DMpSWsW7dBddKxKT7L2GgbNuckz9otxvkvByP -/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt \ No newline at end of file +/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt +/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST diff --git a/build/builtin_actors.go b/build/builtin_actors.go index 50aecde40..2cf40a754 100644 --- a/build/builtin_actors.go +++ b/build/builtin_actors.go @@ -42,6 +42,12 @@ func init() { if err := loadManifests(NetworkBundle); err != nil { panic(err) } + + // The following code cid existed temporarily on the calibnet testnet, as a "buggy" storage miner actor implementation. + // We include it in our builtin bundle, but intentionally omit from metadata. + if NetworkBundle == "calibrationnet" { + actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq"), actorstypes.Version12) + } } // UseNetworkBundle switches to a different network bundle, by name. @@ -183,6 +189,12 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata, if err != nil { return nil, xerrors.Errorf("error loading builtin actors bundle: %w", err) } + + // The following manifest cid existed temporarily on the calibnet testnet + // We include it in our builtin bundle, but intentionally omit from metadata + if root == cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs") { + continue + } bundles = append(bundles, &BuiltinActorsMetadata{ Network: name, Version: actorstypes.Version(version), @@ -232,7 +244,7 @@ func readBundleManifest(r io.Reader) (cid.Cid, map[string]cid.Cid, error) { } // GetEmbeddedBuiltinActorsBundle returns the builtin-actors bundle for the given actors version. -func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version) ([]byte, bool) { +func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version, networkBundleName string) ([]byte, bool) { fi, err := embeddedBuiltinActorReleases.Open(fmt.Sprintf("actors/v%d.tar.zst", version)) if err != nil { return nil, false @@ -243,7 +255,7 @@ func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version) ([]byte, bool) defer uncompressed.Close() //nolint tarReader := tar.NewReader(uncompressed) - targetFileName := fmt.Sprintf("builtin-actors-%s.car", NetworkBundle) + targetFileName := fmt.Sprintf("builtin-actors-%s.car", networkBundleName) for { header, err := tarReader.Next() switch err { diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go index 3eed5bcf7..8d67bba2d 100644 --- a/build/builtin_actors_gen.go +++ b/build/builtin_actors_gen.go @@ -97,25 +97,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "butterflynet", Version: 12, - BundleGitTag: "v11.0.0", - ManifestCid: MustParseCid("bafy2bzaceaiy4dsxxus5xp5n5i4tjzkb7sc54mjz7qnk2efhgmsrobjesxnza"), + BundleGitTag: "v12.0.0-rc.2", + ManifestCid: MustParseCid("bafy2bzaceabeegs5mhrxwqodyt7u2ulxr2jg6eh5g3545ganqzkncnxzserue"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacecfdqb7p3jakhaa3cqnzpt7hxmhghrbxvafsylqno3febx55fnidw"), - "cron": MustParseCid("bafk2bzaceavmqu2qihgbe3xdaotgypuzvdpiifnm7ll6rolks2u4lac6voosk"), - "datacap": MustParseCid("bafk2bzacealtvh65rzb34fmyzw4m2np2htnio4w3pn4alzqovwxkdbf23dvpo"), - "eam": MustParseCid("bafk2bzacedko6hcjmwpuwgma5pb4gr2wgyvregk3nqqjxit7dv4es6vh5cjoc"), - "ethaccount": MustParseCid("bafk2bzacedhcei2xnr34poxr4xziypm2obqlibke4cs2cjfnr3sz6nf6h7fyy"), - "evm": MustParseCid("bafk2bzacebn5lwxboiikhz67ajwa34v2lc4qevnhpwdnipbmrnutkvrrqkb46"), - "init": MustParseCid("bafk2bzacea6vw4esh5tg7mprv5jkbx5xcyilcy4vvf64lss32mjyuvv2mh5ng"), - "multisig": MustParseCid("bafk2bzacedq2afnwcfipay5twv5mgzjoio5bbjvyo4yqchdwqcr7wrareyx54"), - "paymentchannel": MustParseCid("bafk2bzacebbsvr7i7mqmaadyjibe5wxnv7bwvvec2wlgknuwda6ep45amnd5w"), + "account": MustParseCid("bafk2bzacebp7anjdtg2sohyt6lromx4xs7nujtwdfcsffnptphaayabx7ysxs"), + "cron": MustParseCid("bafk2bzacecu2y3awtemmglpkroiglulc2fj3gpdn6eazdqr6avcautiaighrg"), + "datacap": MustParseCid("bafk2bzacebbh5aynu3v3fluqqrcdsphleodoig42xkid2ccwdnff3avhbdop4"), + "eam": MustParseCid("bafk2bzacebzwt4v4hqoltiblhliwrnttxpr2dggbu3wsrvq4pwzisp7idu5w4"), + "ethaccount": MustParseCid("bafk2bzaceb5f6vgjkl7ic6ry5sjspqm2iij6qlcdovwi3haodb7wn37pgebii"), + "evm": MustParseCid("bafk2bzacebygt6zh6p52rkg2ugehm4k5yuu6f56i2pu6ywrmjez4n4zsje4p4"), + "init": MustParseCid("bafk2bzaceagyf3pwsthod7klfi25ow2zf2i5isfrrgr5ua3lvkgfojalrdbhw"), + "multisig": MustParseCid("bafk2bzacedgfo5mw2zqjwi37lah27sfxj4cw2abylgtxf3ucep4dyhgnppmqe"), + "paymentchannel": MustParseCid("bafk2bzacebm37tgu52cgzmiln6iip6etfmq73fd3qqz2j5gxlhtvachs7kw4c"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzaceafuh6idvaqqkj353vs4qdl42tcmvnymewu5zf4rq2nruxdyunses"), - "storagemarket": MustParseCid("bafk2bzaceb7bx4honi3byjllpdk6fea32dpu3vqvil3okodybdk5m3erlnwjw"), - "storageminer": MustParseCid("bafk2bzacebxjhofdr3sb2uhy2ky2vcijh4nhmwkh5xijtbgk6dzkknji2kn7a"), - "storagepower": MustParseCid("bafk2bzaceabskmmkas6njbowols7t4ib3bipa5abpomk3jtgfwojtzd7mjzfm"), - "system": MustParseCid("bafk2bzacedtuh7cht3fud7fb4avl4g2zbz57lc4ohiaufpaex6dkmdokn5rgo"), - "verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"), + "reward": MustParseCid("bafk2bzacedebvitdsztwebi44t5es4ls3p3hor252igzawr3s6uznmbvzh2ou"), + "storagemarket": MustParseCid("bafk2bzaceb2tdeqtt2eqpzeb3gezuchb7g7uzbd52bgvcdt6bg3ckq7oisb74"), + "storageminer": MustParseCid("bafk2bzacea2jzzulmz6ktj6vndjheybz2io3vznnhsb32zberpaqeo7s7xa56"), + "storagepower": MustParseCid("bafk2bzacedxvlj5xmhytdjrjqyonz37duvxb2ioyzk75c27yypkqalxuh3xh6"), + "system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"), + "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"), }, }, { Network: "calibrationnet", @@ -203,25 +203,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "calibrationnet", Version: 12, - BundleGitTag: "v11.0.0", - ManifestCid: MustParseCid("bafy2bzacec5fl7crmxyw234qsmijvffhssgqwuphyaejulbryhel2pxxrxgey"), + BundleGitTag: "v12.0.0-rc.2", + ManifestCid: MustParseCid("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacecrjovyiuh4jryepy4pxddzqjyrg2hfinxzbv37bpzlci54r5mkr6"), - "cron": MustParseCid("bafk2bzacedy76woxmtalmsuaji4kog6wmq4h2kcgcyv5wpxbdz7f2ls2tjjmw"), - "datacap": MustParseCid("bafk2bzacec2inqddxpfm3rufwqr752d2ok3ve4cxfhmloyosy6rj2krtkpwus"), - "eam": MustParseCid("bafk2bzacea6sxno66egkqz5rqjq4e22obkeblxl7e3funjifljuinmrc2ztzg"), - "ethaccount": MustParseCid("bafk2bzacecdsvs7xm3ncm66lsjqh65uzhr3rmu3dlux7qzdgpg737r4kslhxm"), - "evm": MustParseCid("bafk2bzaceaz3b66m2znt27clmbp2zi5jsobw6g2x6fiezynyijgtkehgqhq3a"), - "init": MustParseCid("bafk2bzacecdrw7uedx456hnowtyyhm63mkekdlkh3vmlhvqlya6pn6pokiq5y"), - "multisig": MustParseCid("bafk2bzaceaxyxvmng5cel5huzy5nezscm34s7wuzn2fhndexurs3xjtp7xg5i"), - "paymentchannel": MustParseCid("bafk2bzacedrmyc4c6u6ipdo7hwaagx3urr47r4pw6lwv257wqbj6roumwfvky"), + "account": MustParseCid("bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6"), + "cron": MustParseCid("bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6"), + "datacap": MustParseCid("bafk2bzacecq5ppfskxgv3iea3jarsix6jdduuhwsn4fbvngtbmzelzmlygorm"), + "eam": MustParseCid("bafk2bzacecb6cnwftvavpph4p34zs4psuy5xvbrhf7vszkva4npw6mw3c42xe"), + "ethaccount": MustParseCid("bafk2bzaceajmc3y3sedsqymfla3dzzqzmbu5kmr2iskm26ga2u34ll5fpztfw"), + "evm": MustParseCid("bafk2bzaced4sozr7m6rzcgpobzeiupghthfw6afumysu3oz6bxxirv74uo3vw"), + "init": MustParseCid("bafk2bzaceaewh7b6zl2egclm7fqzx2lsqr57i75lb6cj43ndoa4mal3k5ld3m"), + "multisig": MustParseCid("bafk2bzacednkwcpw5yzxjceoaliajgupzj6iqxe7ks2ll3unspbprbo5f2now"), + "paymentchannel": MustParseCid("bafk2bzacebaxhk4itfiuvbftg7kz5zxugqnvdgerobitjq4vl6q4orcwk6wqg"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacecq3bhrkatwash5zhy2275ksaj3criqb6rox5e3hsyvz7mrl2jh3o"), - "storagemarket": MustParseCid("bafk2bzacedswrpkbh7jmttskllbblym7oj2ynxp7bxtj2fpbxsx55mraog6sc"), - "storageminer": MustParseCid("bafk2bzacecki6ckm7gf4uje3fxvw6x5f77ukaqctvcsfha6oaecvl67veh3sg"), - "storagepower": MustParseCid("bafk2bzacecjcvxwibkgpufeah33gfd2jzlqjx5rn2pguvvch2squon23u6kne"), - "system": MustParseCid("bafk2bzaceavvlgqbcwhy3c24aa24z23wcbzggmb66gj7x5if7o3fbvddaocc4"), - "verifiedregistry": MustParseCid("bafk2bzacedmxdtnpy2mc63b6bi2h4vp4dfc6hxjckqnwaxyijgkpmangygcmk"), + "reward": MustParseCid("bafk2bzacedra77pcglf7vdca2itcaa4vd6xrxynxmgfgdjdxqxfwqyhtoxehy"), + "storagemarket": MustParseCid("bafk2bzacea7g46y7xxu2zjq2h75x6mmx3utz2uxnlvnwi6tzpsvulna3bmiva"), + "storageminer": MustParseCid("bafk2bzaced7emkbbnrewv5uvrokxpf5tlm4jslu2jsv77ofw2yqdglg657uie"), + "storagepower": MustParseCid("bafk2bzacedd3ka44k7d46ckbinjhv3diyuu2epgbyvhqqyjkc64qlrg3wlgzi"), + "system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"), + "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"), }, }, { Network: "caterpillarnet", @@ -318,25 +318,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "caterpillarnet", Version: 12, - BundleGitTag: "v11.0.0", - ManifestCid: MustParseCid("bafy2bzacebexc2jgzwr5ngn6jdnkwdqwwmcapajuypdgvopoe6bnvp4yxm4o2"), + BundleGitTag: "v12.0.0-rc.2", + ManifestCid: MustParseCid("bafy2bzacechvmc3iy6qrsbb6xwjhjibqpznqnkajowjsdmrq42ie5hysqemqa"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzaceanjiq5m3feytue5m7hhxfkob2ofg2greoct5tr77reuhrjglo66g"), - "cron": MustParseCid("bafk2bzaceavgd5qj6n744tukhdrvxejygzs3jnlizmcvjsdnxkgiimrd5jrys"), - "datacap": MustParseCid("bafk2bzacedmdywxwrzop2gmf4ys5stydlmvbe35j3nyr2efmf273briksuvse"), - "eam": MustParseCid("bafk2bzacec7qo7s72li7tqysllstlrxxm2dhfqv2w32pytel2e775cki4ozqm"), - "ethaccount": MustParseCid("bafk2bzaceaygtkliu26ubb7ivljrvaeesp5sbjlis5okzl35ishxioa2tlx4w"), - "evm": MustParseCid("bafk2bzacebo7iqzy2ophz4f3civzwlltec7q5fut7kmtfckr6vy33r6ic5eqe"), - "init": MustParseCid("bafk2bzaceb7uzzlsquqwrqhb2vpbvk3jgr4wp5i3smu2splnag2v5sppdehns"), - "multisig": MustParseCid("bafk2bzacebwibfqrytobl4pjtny244zkmfoomazbap3r5gddjryckx5js4csi"), - "paymentchannel": MustParseCid("bafk2bzacecuaa5esuxpouigxoamyl5gire2qqqhvyhewsig2x2j73f6ksh7go"), + "account": MustParseCid("bafk2bzacecereuhejfvodut5357cai4lmhsyr7uenhcxvmw6jpmhe6auuly32"), + "cron": MustParseCid("bafk2bzacebo2whgy6jla4jsf5j4ovlqm2e4eepedlpw5wadas33yxmunis4b4"), + "datacap": MustParseCid("bafk2bzacecjjncl7ftgj4mrzxxfxld74pt3pyfrxmcru7a5auab25b3aoixm6"), + "eam": MustParseCid("bafk2bzacebyvawfzoxy7k4yxrj5nd3amg4rcopmnslxdwpzumfhsz5ezk4sws"), + "ethaccount": MustParseCid("bafk2bzaceaccs76uc6osvb2iy6w2pumqei3wdjtxq7rgtsotobncmqoi7kzcg"), + "evm": MustParseCid("bafk2bzaceawxgjzjkhbqwj36wzxeqbtngdh6y2tp4wsi27k7tbg2ujhw5rsjg"), + "init": MustParseCid("bafk2bzacedws5od7o6ktqyo2hudmipxuubsv2lwxz45xxjn2zguze72t6zoik"), + "multisig": MustParseCid("bafk2bzacecb4wk6n4lrmml3tssn6cszd4dc7ttux3kzjatrawhg4o6ovrng6w"), + "paymentchannel": MustParseCid("bafk2bzacea3eb556mkjvosfbqfbyfg6dgu52rfnuctwzjy3b2bh2azredxzbo"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzaced4xxqhv63njf2ibvsqshlwikafctxev7aho5lgsfxyt2javjwvtw"), - "storagemarket": MustParseCid("bafk2bzacedwtx3xokqmbgkgkoqkdt6lam4ymdjb3eznlbtec5wcrtx74l2bpc"), - "storageminer": MustParseCid("bafk2bzacebbbe4sdo3xxkez7x7lkl6j46w34vx7eg7xswmdzhp7moa44p3wjg"), - "storagepower": MustParseCid("bafk2bzacedfgz6n24tjsor4pcayomim2f5f3a3fgyatmjgwxxeejna7okndda"), - "system": MustParseCid("bafk2bzacebxfzeom3d7ahcz2n2nlwp7ncv767bdbbrisugks4l6v7lcu2tmyg"), - "verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"), + "reward": MustParseCid("bafk2bzaceb2x5zgkrho373l3ippp6krs7brugssg6hj2tib22xmqjzdm2o25a"), + "storagemarket": MustParseCid("bafk2bzaced5j6drzmsebpxbf2dtptrc5tyidlbftdljqxavxqb57s2qpbvdek"), + "storageminer": MustParseCid("bafk2bzacec7al5izu3ynnb7wg6fxxck3hebtkvlgynufjwcio57jd3n4ke2ue"), + "storagepower": MustParseCid("bafk2bzaceagp6ilkltsltwii66nz6a4zen4qtfk7rdkvdv3gzq7fbv4ivox3u"), + "system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"), + "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"), }, }, { Network: "devnet", @@ -424,25 +424,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "devnet", Version: 12, - BundleGitTag: "v11.0.0", - ManifestCid: MustParseCid("bafy2bzaceay35go4xbjb45km6o46e5bib3bi46panhovcbedrynzwmm3drr4i"), + BundleGitTag: "v12.0.0-rc.2", + ManifestCid: MustParseCid("bafy2bzaceau5i7eanhvss22z5ixmyrihilfniqn22tvkecjj56akz4xj7fvku"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacecf2pprkbdlpm4e2xz3ufunxtgrgyh2ie3stuqiyhibsvdze7kvri"), - "cron": MustParseCid("bafk2bzaceasr5d2skowvzv5mzsyak6waqrgc46ewj6rzbapkfi5woom6n6bwa"), - "datacap": MustParseCid("bafk2bzaceaqd77gptubupda7rp7daxkxbkzwc253dxhiyoezxvj2tljmkgpny"), - "eam": MustParseCid("bafk2bzacedve6p4ye6zxydjbfs4ode5r2equ7rqzpyltujsq2lu6wyxnijfx4"), - "ethaccount": MustParseCid("bafk2bzacea25xfsxwew3h2crer6jlb4c5vwu2gtch2jh73ocuxjhupenyrugy"), - "evm": MustParseCid("bafk2bzacece5hivtkmi757lyfahgti7xuqgofodb2u65pxgf6oizfwiiwlcsi"), - "init": MustParseCid("bafk2bzacecxnr5y7qifzdqqiwfbjxv2yr7lbkcyu3e2mf5zjdncteupxdlquu"), - "multisig": MustParseCid("bafk2bzaceayap4k4u3lbysaeeixct5fvhmafy3fa5eagvdpk3i4a7ubfdpobe"), - "paymentchannel": MustParseCid("bafk2bzaceafgrz5wepbein35gie7rnsu7zttxvgllgdneuefmmy4j5izydtza"), + "account": MustParseCid("bafk2bzacedki4apynvdxxuoigmqkgaktgy2erjftoxqxqaklnelgveyaqknfu"), + "cron": MustParseCid("bafk2bzacebjpczf7qtcisy3zdp3sqoohxe75tgupmdo5dr26vh7orzrsjn3b2"), + "datacap": MustParseCid("bafk2bzacecz4esatk7gizdc7yvl6soigkelhix7izbc75q6eqtb7gjzavpcqc"), + "eam": MustParseCid("bafk2bzacebhtpd5mxfyovi7fgsfj62nhtmh4t5guob4sgq73ymgsk7473ltig"), + "ethaccount": MustParseCid("bafk2bzacebvdbbw5ag4qnxd7cif5mtakrw4wzv63diwl7awta5plaidfay4vg"), + "evm": MustParseCid("bafk2bzacebb7vrhprnshn52bzfmypjdpcrcfecapk232a6gapk3kghu2mp67q"), + "init": MustParseCid("bafk2bzaceaw4iouukgqxmwukfpt3sakdvsu75ftjvw47swnwtdftz5oszbt4w"), + "multisig": MustParseCid("bafk2bzaceahyjwf6re4mnuwhopglo3qzh6aboluboncpijm7vuiz3u4bkazho"), + "paymentchannel": MustParseCid("bafk2bzaceaupjw3djghaqw3g3hd4tw7uuas3njkszgzx2fhmgqh5eh4e6q2by"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacedwbtfqlx47fdkxjrb5mwiatheci44x3zkpx33smybc2cme23ymuo"), - "storagemarket": MustParseCid("bafk2bzaceaj74fmooaf3gj3ebwon64ky7hhdh7kytdr3agclqfrqzmpzykh7g"), - "storageminer": MustParseCid("bafk2bzacedb7bokkzzs7hnbhivp74pgcpermuy7j6b3ncodylksukkxtnn7ze"), - "storagepower": MustParseCid("bafk2bzacedilnkegizkxz3nuutib4d4wwlk4bkla22loepia2h53yf4hysmq6"), - "system": MustParseCid("bafk2bzacedpyoncjbl4oxkjm5e77ngvpy2xfajjc4myfsv2vltvzxioattlu2"), - "verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"), + "reward": MustParseCid("bafk2bzacebzso6xkjxdscbpncw7el2d4hap6lfkgwqzrbc76lzp33vkwk6obc"), + "storagemarket": MustParseCid("bafk2bzacebzg74vyk3gzbhnz4zviwvxblyar574mtd6ayognmsvlkriejmunu"), + "storageminer": MustParseCid("bafk2bzaceckqrzomdnfb35byrhabrmmapxplj66cv3efw7u62qswjaqsuxah4"), + "storagepower": MustParseCid("bafk2bzacebbtj2m2ajawfuzxqz5nmdep7xevjo2qfjqa5tx3vr5m6qojolya4"), + "system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"), + "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"), }, }, { Network: "hyperspace", @@ -553,25 +553,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "mainnet", Version: 12, - BundleGitTag: "v11.0.0", - ManifestCid: MustParseCid("bafy2bzacecnhaiwcrpyjvzl4uv4q3jzoif26okl3m66q3cijp3dfwlcxwztwo"), + BundleGitTag: "v12.0.0-rc.2", + ManifestCid: MustParseCid("bafy2bzacecooh5cjxkmraawn7wvmrx7k5hwdmbvqdteujr33mnaeclsazyb6a"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzacealnlr7st6lkwoh6wxpf2hnrlex5sknaopgmkr2tuhg7vmbfy45so"), - "cron": MustParseCid("bafk2bzacebpewdvvgt6tk2o2u4rcovdgym67tadiis5usemlbejg7k3kt567o"), - "datacap": MustParseCid("bafk2bzacebslykoyrb2hm7aacjngqgd5n2wmeii2goadrs5zaya3pvdf6pdnq"), - "eam": MustParseCid("bafk2bzaceaelwt4yfsfvsu3pa3miwalsvy3cfkcjvmt4sqoeopsppnrmj2mf2"), - "ethaccount": MustParseCid("bafk2bzaceclkmc4yidxc6lgcjpfypbde2eddnevcveo4j5kmh4ek6inqysz2k"), - "evm": MustParseCid("bafk2bzacediwh6etwzwmb5pivtclpdplewdjzphouwqpppce6opisjv2fjqfe"), - "init": MustParseCid("bafk2bzaceckwf3w6n2nw6eh77ktmsxqgsvshonvgnyk5q5syyngtetxvasfxg"), - "multisig": MustParseCid("bafk2bzaceafajceqwg5ybiz7xw6rxammuirkgtuv625gzaehsqfprm4bazjmk"), - "paymentchannel": MustParseCid("bafk2bzaceb4e6cnsnviegmqvsmoxzncruvhra54piq7bwiqfqevle6oob2gvo"), + "account": MustParseCid("bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa"), + "cron": MustParseCid("bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls"), + "datacap": MustParseCid("bafk2bzacebpiwb2ml4qbnnaayxumtk43ryhc63exdgnhivy3hwgmzemawsmpq"), + "eam": MustParseCid("bafk2bzaceb3elj4hfbbjp7g5bptc7su7mptszl4nlqfedilxvstjo5ungm6oe"), + "ethaccount": MustParseCid("bafk2bzaceb4gkau2vgsijcxpfuq33bd7w3efr2rrhxrwiacjmns2ntdiamswq"), + "evm": MustParseCid("bafk2bzacecmnyfiwb52tkbwmm2dsd7ysi3nvuxl3lmspy7pl26wxj4zj7w4wi"), + "init": MustParseCid("bafk2bzacebllyegx5r6lggf6ymyetbp7amacwpuxakhtjvjtvoy2bfkzk3vms"), + "multisig": MustParseCid("bafk2bzacecw5lyp3n3t67xdwrmo36h4z7afc3lobmmr6wg55w6yjzg5jhmh42"), + "paymentchannel": MustParseCid("bafk2bzacectv4cm47bnhga5febf3lo3fq47g72kmmp2xd5s6tcxz7hiqdywa4"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacebwjw2vxkobs7r2kwjdqqb42h2kucyuk6flbnyzw4odg5s4mogamo"), - "storagemarket": MustParseCid("bafk2bzaceazu2j2zu4p24tr22btnqzkhzjvyjltlvsagaj6w3syevikeb5d7m"), - "storageminer": MustParseCid("bafk2bzacec24okjqrp7c7rj3hbrs5ez5apvwah2ruka6haesgfngf37mhk6us"), - "storagepower": MustParseCid("bafk2bzaceaxgloxuzg35vu7l7tohdgaq2frsfp4ejmuo7tkoxjp5zqrze6sf4"), - "system": MustParseCid("bafk2bzaced7npe5mt5nh72jxr2igi2sofoa7gedt4w6kueeke7i3xxugqpjfm"), - "verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"), + "reward": MustParseCid("bafk2bzacealqnxn5lwzwexd6reav4dppypquklx2ujlnvaxiqk2tzstyvkp5u"), + "storagemarket": MustParseCid("bafk2bzacedylkg5am446lcuih4voyzdn4yjeqfsxfzh5b6mcuhx4mok5ph5c4"), + "storageminer": MustParseCid("bafk2bzacedio7wfaqutc4w6gl2dwqkgpcatz2r223ms74zxiovbjrfxmzeiou"), + "storagepower": MustParseCid("bafk2bzacecsij5tpfzjpfuckxvccv2p3bdqjklkrfyyoei6lx5dyj5j4fvjm6"), + "system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"), + "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"), }, }, { Network: "testing", @@ -659,25 +659,25 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "testing", Version: 12, - BundleGitTag: "v11.0.0", - ManifestCid: MustParseCid("bafy2bzacea2vxre32tg3xhpejrktiuzx4d3pcoe7yyazgscfibmegmchr6n42"), + BundleGitTag: "v12.0.0-rc.2", + ManifestCid: MustParseCid("bafy2bzacec7artlmtdmfuhurd2i7dgturiveblruub5xh3yizjtp5qzye3dly"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"), - "cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"), - "datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"), - "eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"), - "ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"), - "evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"), - "init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"), - "multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"), - "paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"), + "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), + "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), + "datacap": MustParseCid("bafk2bzaceduhmqcyailiwdupt2ottfzh5hcrjoyeyiaipf3idk3mu7y3uz2mc"), + "eam": MustParseCid("bafk2bzaceb2yzzw6dcmcmhnt3mqnm4kah66f23pc4escnto3vwa552t6ctr7i"), + "ethaccount": MustParseCid("bafk2bzacebwkvvbmttkcjjlicp4ineozc52i5sc6d46pcoq6lzzs2p5i2youa"), + "evm": MustParseCid("bafk2bzacedetwacs6wmoksxwjlbpp4442uav7fd3pagadejm2cph7ucym7eck"), + "init": MustParseCid("bafk2bzacedhpoycn4sz7dragmbo5yqjspqriydxhplqdeguaqck2hmq5hgwqg"), + "multisig": MustParseCid("bafk2bzaceacc3m23yvnpzoeekstqtr2acutfv4zvsgncorjdrsucymjohzxs4"), + "paymentchannel": MustParseCid("bafk2bzaceac6i76vfexefqf6qgebkhkf2cb4g664d5nmfh2dric5spgykevd2"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"), - "storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"), - "storageminer": MustParseCid("bafk2bzaced3yg5lctmswnbkxyd6cleg3llyux7fu2vbddyd2ho36fpym423mq"), - "storagepower": MustParseCid("bafk2bzacebvpdf372fzxgixztbz2r7ayxyvx7jmdxwlfuqt2cq7tnqgie3klw"), - "system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"), - "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), + "reward": MustParseCid("bafk2bzaceaajyncjxcrl7wbb6ukzkueyqz4uyekxpmtn4mpndkf7ksmggopzq"), + "storagemarket": MustParseCid("bafk2bzaced6cexdcinwjhtfvuxgkxukiejp3goylaxtvhqfd24rs5z7g2z7dm"), + "storageminer": MustParseCid("bafk2bzaceclcbtu6edh73cn7ga7kzkmxcfnjt7jnxolkc5id6l6f7sztevm24"), + "storagepower": MustParseCid("bafk2bzacedexrf5qplrrl5xzijfrthjdqwodfs5e6zj5kpztc7qnywbqdyiii"), + "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), + "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, }, { Network: "testing-fake-proofs", @@ -765,24 +765,24 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet }, { Network: "testing-fake-proofs", Version: 12, - BundleGitTag: "v11.0.0", - ManifestCid: MustParseCid("bafy2bzacecojemqglhzzhjnhgtrcbsgkyv67ziytvtbhwlr4ym4oxqofv7zui"), + BundleGitTag: "v12.0.0-rc.2", + ManifestCid: MustParseCid("bafy2bzaceand7owcb3omq7zjwir2q3lqapprl2eyiwpzv6oqs5bok4vzzqjbu"), Actors: map[string]cid.Cid{ - "account": MustParseCid("bafk2bzaceccerssb3tgel6ukdghlwvs7dxsolj4fpkgn7dh7owzwapqb6ejpw"), - "cron": MustParseCid("bafk2bzacebtfl6fczxnitrqqjhyefskf3asyn3gzuvqcddieoqfsaddk5fd4q"), - "datacap": MustParseCid("bafk2bzacediikc55y6uzmnhucf4mik6rqpjulwvgp5gdibogxjhgbvusmzff2"), - "eam": MustParseCid("bafk2bzaceazqi5ezossp6kvqogaaba6hxlfarqgniktmb7iy5qonha3eflz6m"), - "ethaccount": MustParseCid("bafk2bzaceb77ospgfqqmf67v23wkyeg7lr2mu53ybaacu3bslx7s7nhttdueo"), - "evm": MustParseCid("bafk2bzacedvgt7mv22hux4vrnklylq7qmw43kfrqwam6wdsfzkdnaewr33qbu"), - "init": MustParseCid("bafk2bzacealzb3nk2oypway5ubz3hs5py5ok5tuw545454vg4d3mwbslef4js"), - "multisig": MustParseCid("bafk2bzacec45ppn4hrwizmopp2v2atkxw35tb6yem6uqhqilrv7aiaknnnxmu"), - "paymentchannel": MustParseCid("bafk2bzaceajbr3t6cngzh3katqteflbcrtwtdgbthnlfemon5tg6rytf2uonw"), + "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), + "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), + "datacap": MustParseCid("bafk2bzaceduhmqcyailiwdupt2ottfzh5hcrjoyeyiaipf3idk3mu7y3uz2mc"), + "eam": MustParseCid("bafk2bzaceb2yzzw6dcmcmhnt3mqnm4kah66f23pc4escnto3vwa552t6ctr7i"), + "ethaccount": MustParseCid("bafk2bzacebwkvvbmttkcjjlicp4ineozc52i5sc6d46pcoq6lzzs2p5i2youa"), + "evm": MustParseCid("bafk2bzacedetwacs6wmoksxwjlbpp4442uav7fd3pagadejm2cph7ucym7eck"), + "init": MustParseCid("bafk2bzacedhpoycn4sz7dragmbo5yqjspqriydxhplqdeguaqck2hmq5hgwqg"), + "multisig": MustParseCid("bafk2bzaceacc3m23yvnpzoeekstqtr2acutfv4zvsgncorjdrsucymjohzxs4"), + "paymentchannel": MustParseCid("bafk2bzaceac6i76vfexefqf6qgebkhkf2cb4g664d5nmfh2dric5spgykevd2"), "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), - "reward": MustParseCid("bafk2bzacea7ycf53kbq4robcuh3ziy7qwwhaqamc5krn3lugypgpxhlewdaiq"), - "storagemarket": MustParseCid("bafk2bzacedskmbcpaeb6bezs32szh52jrukvihobluadauayroo5gzrt32tkm"), - "storageminer": MustParseCid("bafk2bzacebqeztpa5exztccqjwqhan5droiy7ga6zekm6f2gzxoe655vneczm"), - "storagepower": MustParseCid("bafk2bzaceb2tlyuwxncdxsh3hc4fwcjnpxaijkiv54ustwdjbrqabxdsc27km"), - "system": MustParseCid("bafk2bzaceaatvscbnkv36ixhtt2zel4er5oskxevgumh5gegqkv7uzah36f24"), - "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), + "reward": MustParseCid("bafk2bzaceaajyncjxcrl7wbb6ukzkueyqz4uyekxpmtn4mpndkf7ksmggopzq"), + "storagemarket": MustParseCid("bafk2bzaced6cexdcinwjhtfvuxgkxukiejp3goylaxtvhqfd24rs5z7g2z7dm"), + "storageminer": MustParseCid("bafk2bzacebbusvtnyhi7mimbnx2tw2uyvrr3fbmncbf6frx6e4kvjvoqllu36"), + "storagepower": MustParseCid("bafk2bzacecdwijcbbryinjtm27pdinqqkyzoskri24pwsvsadwcq2alkkjpnc"), + "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), + "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, }} diff --git a/build/genesis/butterflynet.car b/build/genesis/butterflynet.car index 30ec609ec..962964c35 100644 Binary files a/build/genesis/butterflynet.car and b/build/genesis/butterflynet.car differ diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index d0d6cc1ce..9bcb54b58 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz index 8860d7071..e0c6c61b8 100644 Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 3696821da..817595c7b 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 207ba656e..6adce6cb9 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_2k.go b/build/params_2k.go index 5e0f5c60d..df4f3dece 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -67,6 +67,9 @@ var UpgradeThunderHeight = abi.ChainEpoch(-23) var UpgradeWatermelonHeight = abi.ChainEpoch(200) +// This fix upgrade only ran on calibrationnet +const UpgradeWatermelonFixHeight = -100 + var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 7d3b613c9..31153dffe 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -19,7 +19,7 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } -const GenesisNetworkVersion = network.Version18 +const GenesisNetworkVersion = network.Version20 var NetworkBundle = "butterflynet" var BundleOverrides map[actorstypes.Version]string @@ -52,10 +52,13 @@ const UpgradeOhSnapHeight = -18 const UpgradeSkyrHeight = -19 const UpgradeSharkHeight = -20 const UpgradeHyggeHeight = -21 +const UpgradeLightningHeight = -22 +const UpgradeThunderHeight = -23 -const UpgradeLightningHeight = 50 +const UpgradeWatermelonHeight = 400 -const UpgradeThunderHeight = UpgradeLightningHeight + 360 +// This fix upgrade only ran on calibrationnet +const UpgradeWatermelonFixHeight = -100 // ?????????? const UpgradeWatermelonHeight = 999999999999999 diff --git a/build/params_calibnet.go b/build/params_calibnet.go index 4081d4c1d..2cc53925c 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -79,8 +79,11 @@ const UpgradeLightningHeight = 489094 // 2023-04-21T16:00:00Z const UpgradeThunderHeight = UpgradeLightningHeight + 3120 -// ?????????? -const UpgradeWatermelonHeight = 999999999999999 +// 2023-10-19T13:00:00Z +const UpgradeWatermelonHeight = 1013134 + +// 2023-11-07T13:00:00Z +const UpgradeWatermelonFixHeight = 1070494 var SupportedProofTypes = []abi.RegisteredSealProof{ abi.RegisteredSealProof_StackedDrg32GiBV1, diff --git a/build/params_interop.go b/build/params_interop.go index 2b2f36160..104ae83c8 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -56,6 +56,9 @@ var UpgradeThunderHeight = abi.ChainEpoch(-23) const UpgradeWatermelonHeight = 50 +// This fix upgrade only ran on calibrationnet +const UpgradeWatermelonFixHeight = -1 + var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } diff --git a/build/params_mainnet.go b/build/params_mainnet.go index f35ac45ef..c607bcdd0 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -95,8 +95,11 @@ const UpgradeLightningHeight = 2809800 // 2023-05-18T13:00:00Z const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21 -// ??????? -var UpgradeWatermelonHeight = abi.ChainEpoch(9999999999) +// 2023-11-29T13:30:00Z +var UpgradeWatermelonHeight = abi.ChainEpoch(3431940) + +// This fix upgrade only ran on calibrationnet +const UpgradeWatermelonFixHeight = -1 var SupportedProofTypes = []abi.RegisteredSealProof{ abi.RegisteredSealProof_StackedDrg32GiBV1, diff --git a/build/params_testground.go b/build/params_testground.go index 46d2e16de..8073b0c6e 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -87,29 +87,30 @@ var ( UpgradeBreezeHeight abi.ChainEpoch = -1 BreezeGasTampingDuration abi.ChainEpoch = 0 - UpgradeSmokeHeight abi.ChainEpoch = -1 - UpgradeIgnitionHeight abi.ChainEpoch = -2 - UpgradeRefuelHeight abi.ChainEpoch = -3 - UpgradeTapeHeight abi.ChainEpoch = -4 - UpgradeAssemblyHeight abi.ChainEpoch = 10 - UpgradeLiftoffHeight abi.ChainEpoch = -5 - UpgradeKumquatHeight abi.ChainEpoch = -6 - UpgradeCalicoHeight abi.ChainEpoch = -8 - UpgradePersianHeight abi.ChainEpoch = -9 - UpgradeOrangeHeight abi.ChainEpoch = -10 - UpgradeClausHeight abi.ChainEpoch = -11 - UpgradeTrustHeight abi.ChainEpoch = -12 - UpgradeNorwegianHeight abi.ChainEpoch = -13 - UpgradeTurboHeight abi.ChainEpoch = -14 - UpgradeHyperdriveHeight abi.ChainEpoch = -15 - UpgradeChocolateHeight abi.ChainEpoch = -16 - UpgradeOhSnapHeight abi.ChainEpoch = -17 - UpgradeSkyrHeight abi.ChainEpoch = -18 - UpgradeSharkHeight abi.ChainEpoch = -19 - UpgradeHyggeHeight abi.ChainEpoch = -20 - UpgradeLightningHeight abi.ChainEpoch = -21 - UpgradeThunderHeight abi.ChainEpoch = -22 - UpgradeWatermelonHeight abi.ChainEpoch = -23 + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeRefuelHeight abi.ChainEpoch = -3 + UpgradeTapeHeight abi.ChainEpoch = -4 + UpgradeAssemblyHeight abi.ChainEpoch = 10 + UpgradeLiftoffHeight abi.ChainEpoch = -5 + UpgradeKumquatHeight abi.ChainEpoch = -6 + UpgradeCalicoHeight abi.ChainEpoch = -8 + UpgradePersianHeight abi.ChainEpoch = -9 + UpgradeOrangeHeight abi.ChainEpoch = -10 + UpgradeClausHeight abi.ChainEpoch = -11 + UpgradeTrustHeight abi.ChainEpoch = -12 + UpgradeNorwegianHeight abi.ChainEpoch = -13 + UpgradeTurboHeight abi.ChainEpoch = -14 + UpgradeHyperdriveHeight abi.ChainEpoch = -15 + UpgradeChocolateHeight abi.ChainEpoch = -16 + UpgradeOhSnapHeight abi.ChainEpoch = -17 + UpgradeSkyrHeight abi.ChainEpoch = -18 + UpgradeSharkHeight abi.ChainEpoch = -19 + UpgradeHyggeHeight abi.ChainEpoch = -20 + UpgradeLightningHeight abi.ChainEpoch = -21 + UpgradeThunderHeight abi.ChainEpoch = -22 + UpgradeWatermelonHeight abi.ChainEpoch = -23 + UpgradeWatermelonFixHeight abi.ChainEpoch = -24 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index 414a11e72..4516683a8 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors" ) +var InitActorAddr = builtin.InitActorAddr var SystemActorAddr = builtin.SystemActorAddr var BurntFundsActorAddr = builtin.BurntFundsActorAddr var CronActorAddr = builtin.CronActorAddr diff --git a/chain/actors/builtin/builtin.go.template b/chain/actors/builtin/builtin.go.template index 3b737c47e..3d08a12bf 100644 --- a/chain/actors/builtin/builtin.go.template +++ b/chain/actors/builtin/builtin.go.template @@ -23,6 +23,7 @@ import ( smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing" ) +var InitActorAddr = builtin.InitActorAddr var SystemActorAddr = builtin.SystemActorAddr var BurntFundsActorAddr = builtin.BurntFundsActorAddr var CronActorAddr = builtin.CronActorAddr diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template index bbaa5c775..1eab9d743 100644 --- a/chain/actors/builtin/market/state.go.template +++ b/chain/actors/builtin/market/state.go.template @@ -19,7 +19,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/go-state-types/manifest" {{if (le .v 7)}} diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go index c0a628b47..ca6970dfa 100644 --- a/chain/actors/builtin/market/v0.go +++ b/chain/actors/builtin/market/v0.go @@ -11,13 +11,13 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v10.go b/chain/actors/builtin/market/v10.go index aaa0ee0f1..878f0d465 100644 --- a/chain/actors/builtin/market/v10.go +++ b/chain/actors/builtin/market/v10.go @@ -17,11 +17,11 @@ import ( market10 "github.com/filecoin-project/go-state-types/builtin/v10/market" adt10 "github.com/filecoin-project/go-state-types/builtin/v10/util/adt" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v11.go b/chain/actors/builtin/market/v11.go index a816e3409..a64272209 100644 --- a/chain/actors/builtin/market/v11.go +++ b/chain/actors/builtin/market/v11.go @@ -17,11 +17,11 @@ import ( market11 "github.com/filecoin-project/go-state-types/builtin/v11/market" adt11 "github.com/filecoin-project/go-state-types/builtin/v11/util/adt" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v12.go b/chain/actors/builtin/market/v12.go index 3532fc4f4..56e651a9b 100644 --- a/chain/actors/builtin/market/v12.go +++ b/chain/actors/builtin/market/v12.go @@ -17,11 +17,11 @@ import ( market12 "github.com/filecoin-project/go-state-types/builtin/v12/market" adt12 "github.com/filecoin-project/go-state-types/builtin/v12/util/adt" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go index 89ffdde8f..ba84e3b03 100644 --- a/chain/actors/builtin/market/v2.go +++ b/chain/actors/builtin/market/v2.go @@ -11,13 +11,13 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go index f4d073ed8..f6a0891e7 100644 --- a/chain/actors/builtin/market/v3.go +++ b/chain/actors/builtin/market/v3.go @@ -11,13 +11,13 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" adt3 "github.com/filecoin-project/specs-actors/v3/actors/util/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go index 422a30cbb..629e833b6 100644 --- a/chain/actors/builtin/market/v4.go +++ b/chain/actors/builtin/market/v4.go @@ -11,13 +11,13 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" adt4 "github.com/filecoin-project/specs-actors/v4/actors/util/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go index b30decb03..892588979 100644 --- a/chain/actors/builtin/market/v5.go +++ b/chain/actors/builtin/market/v5.go @@ -11,13 +11,13 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market" adt5 "github.com/filecoin-project/specs-actors/v5/actors/util/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v6.go b/chain/actors/builtin/market/v6.go index 377b278ae..b57d49f91 100644 --- a/chain/actors/builtin/market/v6.go +++ b/chain/actors/builtin/market/v6.go @@ -13,13 +13,13 @@ import ( rlepluslazy "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market" adt6 "github.com/filecoin-project/specs-actors/v6/actors/util/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go index cd4607cbe..56a1db328 100644 --- a/chain/actors/builtin/market/v7.go +++ b/chain/actors/builtin/market/v7.go @@ -13,13 +13,13 @@ import ( rlepluslazy "github.com/filecoin-project/go-bitfield/rle" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v8.go b/chain/actors/builtin/market/v8.go index 5cce06d3a..9c68ee1fd 100644 --- a/chain/actors/builtin/market/v8.go +++ b/chain/actors/builtin/market/v8.go @@ -16,11 +16,11 @@ import ( market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/market/v9.go b/chain/actors/builtin/market/v9.go index 095c20850..d692c15cc 100644 --- a/chain/actors/builtin/market/v9.go +++ b/chain/actors/builtin/market/v9.go @@ -17,11 +17,11 @@ import ( market9 "github.com/filecoin-project/go-state-types/builtin/v9/market" markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" adt9 "github.com/filecoin-project/go-state-types/builtin/v9/util/adt" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index 7319ee9c5..b4ad3a4b5 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -154,7 +154,7 @@ type Partition interface { type SectorOnChainInfo = minertypes.SectorOnChainInfo -func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { +func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old // ones in network version 8. if nver < network.Version7 { @@ -174,17 +174,34 @@ func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.Re } } + if nver < MinSyntheticPoRepVersion || !configWantSynthetic { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } + } + switch proof { case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1: - return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1: - return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1: - return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1: - return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1: - return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep, nil default: return -1, xerrors.Errorf("unrecognized window post type: %d", proof) } @@ -213,8 +230,37 @@ type PendingBeneficiaryChange = minertypes.PendingBeneficiaryChange type WorkerKeyChange = minertypes.WorkerKeyChange type SectorPreCommitOnChainInfo = minertypes.SectorPreCommitOnChainInfo type SectorPreCommitInfo = minertypes.SectorPreCommitInfo +type SubmitWindowedPoStParams = minertypes.SubmitWindowedPoStParams +type PoStPartition = minertypes.PoStPartition +type RecoveryDeclaration = minertypes.RecoveryDeclaration +type FaultDeclaration = minertypes.FaultDeclaration +type DeclareFaultsRecoveredParams = minertypes.DeclareFaultsRecoveredParams +type DeclareFaultsParams = minertypes.DeclareFaultsParams +type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams +type ProveCommitSectorParams = minertypes.ProveCommitSectorParams +type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ReplicaUpdate = minertypes.ReplicaUpdate +type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams +type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 +type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params +type SectorClaim = minertypes.SectorClaim +type ExpirationExtension2 = minertypes.ExpirationExtension2 +type CompactPartitionsParams = minertypes.CompactPartitionsParams +type WithdrawBalanceParams = minertypes.WithdrawBalanceParams + +var QAPowerMax = minertypes.QAPowerMax + type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo +var WPoStProvingPeriod = func() abi.ChainEpoch { return minertypes.WPoStProvingPeriod } +var WPoStChallengeWindow = func() abi.ChainEpoch { return minertypes.WPoStChallengeWindow } + +const WPoStPeriodDeadlines = minertypes.WPoStPeriodDeadlines +const WPoStChallengeLookback = minertypes.WPoStChallengeLookback +const FaultDeclarationCutoff = minertypes.FaultDeclarationCutoff +const MinAggregatedSectors = minertypes.MinAggregatedSectors +const MinSectorExpiration = minertypes.MinSectorExpiration + type SectorExpiration struct { OnTime abi.ChainEpoch diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 3ad17b033..1637cdade 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -214,7 +214,7 @@ type Partition interface { type SectorOnChainInfo = minertypes.SectorOnChainInfo -func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { +func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old // ones in network version 8. if nver < network.Version7 { @@ -234,17 +234,34 @@ func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.Re } } + if nver < MinSyntheticPoRepVersion || !configWantSynthetic { + switch proof { + case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + default: + return -1, xerrors.Errorf("unrecognized window post type: %d", proof) + } + } + switch proof { case abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, abi.RegisteredPoStProof_StackedDrgWindow2KiBV1_1: - return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, abi.RegisteredPoStProof_StackedDrgWindow8MiBV1_1: - return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, abi.RegisteredPoStProof_StackedDrgWindow512MiBV1_1: - return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, abi.RegisteredPoStProof_StackedDrgWindow32GiBV1_1: - return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep, nil case abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1_1: - return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + return abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep, nil default: return -1, xerrors.Errorf("unrecognized window post type: %d", proof) } @@ -273,8 +290,37 @@ type PendingBeneficiaryChange = minertypes.PendingBeneficiaryChange type WorkerKeyChange = minertypes.WorkerKeyChange type SectorPreCommitOnChainInfo = minertypes.SectorPreCommitOnChainInfo type SectorPreCommitInfo = minertypes.SectorPreCommitInfo +type SubmitWindowedPoStParams = minertypes.SubmitWindowedPoStParams +type PoStPartition = minertypes.PoStPartition +type RecoveryDeclaration = minertypes.RecoveryDeclaration +type FaultDeclaration = minertypes.FaultDeclaration +type DeclareFaultsRecoveredParams = minertypes.DeclareFaultsRecoveredParams +type DeclareFaultsParams = minertypes.DeclareFaultsParams +type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams +type ProveCommitSectorParams = minertypes.ProveCommitSectorParams +type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ReplicaUpdate = minertypes.ReplicaUpdate +type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams +type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 +type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params +type SectorClaim = minertypes.SectorClaim +type ExpirationExtension2 = minertypes.ExpirationExtension2 +type CompactPartitionsParams = minertypes.CompactPartitionsParams +type WithdrawBalanceParams = minertypes.WithdrawBalanceParams + +var QAPowerMax = minertypes.QAPowerMax + type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo +var WPoStProvingPeriod = func() abi.ChainEpoch { return minertypes.WPoStProvingPeriod } +var WPoStChallengeWindow = func() abi.ChainEpoch { return minertypes.WPoStChallengeWindow } + +const WPoStPeriodDeadlines = minertypes.WPoStPeriodDeadlines +const WPoStChallengeLookback = minertypes.WPoStChallengeLookback +const FaultDeclarationCutoff = minertypes.FaultDeclarationCutoff +const MinAggregatedSectors = minertypes.MinAggregatedSectors +const MinSectorExpiration = minertypes.MinSectorExpiration + type SectorExpiration struct { OnTime abi.ChainEpoch diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index b322b2283..3c7f05d9a 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -72,7 +72,7 @@ func (s *state{{.v}}) AvailableBalance(bal abi.TokenAmount) (available abi.Token available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available{{if (ge .v 2)}}, err{{end}} = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go index dae3d3bc2..072eab986 100644 --- a/chain/actors/builtin/miner/utils.go +++ b/chain/actors/builtin/miner/utils.go @@ -8,6 +8,8 @@ import ( "github.com/filecoin-project/go-state-types/network" ) +var MinSyntheticPoRepVersion = network.Version21 + func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) { var parts []bitfield.BitField @@ -31,7 +33,7 @@ func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) // SealProofTypeFromSectorSize returns preferred seal proof type for creating // new miner actors and new sectors -func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredSealProof, error) { +func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version, synthetic bool) (abi.RegisteredSealProof, error) { switch { case nv < network.Version7: switch ssize { @@ -49,25 +51,49 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi. return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) } case nv >= network.Version7: + var v abi.RegisteredSealProof switch ssize { case 2 << 10: - return abi.RegisteredSealProof_StackedDrg2KiBV1_1, nil + v = abi.RegisteredSealProof_StackedDrg2KiBV1_1 case 8 << 20: - return abi.RegisteredSealProof_StackedDrg8MiBV1_1, nil + v = abi.RegisteredSealProof_StackedDrg8MiBV1_1 case 512 << 20: - return abi.RegisteredSealProof_StackedDrg512MiBV1_1, nil + v = abi.RegisteredSealProof_StackedDrg512MiBV1_1 case 32 << 30: - return abi.RegisteredSealProof_StackedDrg32GiBV1_1, nil + v = abi.RegisteredSealProof_StackedDrg32GiBV1_1 case 64 << 30: - return abi.RegisteredSealProof_StackedDrg64GiBV1_1, nil + v = abi.RegisteredSealProof_StackedDrg64GiBV1_1 default: return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) } + + if nv >= MinSyntheticPoRepVersion && synthetic { + return toSynthetic(v) + } else { + return v, nil + } } return 0, xerrors.Errorf("unsupported network version") } +func toSynthetic(in abi.RegisteredSealProof) (abi.RegisteredSealProof, error) { + switch in { + case abi.RegisteredSealProof_StackedDrg2KiBV1_1: + return abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep, nil + case abi.RegisteredSealProof_StackedDrg8MiBV1_1: + return abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep, nil + case abi.RegisteredSealProof_StackedDrg512MiBV1_1: + return abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep, nil + case abi.RegisteredSealProof_StackedDrg32GiBV1_1: + return abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep, nil + case abi.RegisteredSealProof_StackedDrg64GiBV1_1: + return abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep, nil + default: + return 0, xerrors.Errorf("unsupported conversion to synthetic: %v", in) + } +} + // WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating // new miner actors and new sectors func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi.RegisteredPoStProof, error) { diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 7d5eaf8e0..5301ed1cb 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -62,7 +62,7 @@ func (s *state0) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v10.go b/chain/actors/builtin/miner/v10.go index 4d47ba396..53dc90b45 100644 --- a/chain/actors/builtin/miner/v10.go +++ b/chain/actors/builtin/miner/v10.go @@ -62,7 +62,7 @@ func (s *state10) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v11.go b/chain/actors/builtin/miner/v11.go index a3ffd606f..11a91c26b 100644 --- a/chain/actors/builtin/miner/v11.go +++ b/chain/actors/builtin/miner/v11.go @@ -62,7 +62,7 @@ func (s *state11) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v12.go b/chain/actors/builtin/miner/v12.go index 787da7d0f..90ecc97fd 100644 --- a/chain/actors/builtin/miner/v12.go +++ b/chain/actors/builtin/miner/v12.go @@ -62,7 +62,7 @@ func (s *state12) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmou available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index 14341ae38..5a81ad31f 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -61,7 +61,7 @@ func (s *state2) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go index 52808da8c..aa1574cf4 100644 --- a/chain/actors/builtin/miner/v3.go +++ b/chain/actors/builtin/miner/v3.go @@ -62,7 +62,7 @@ func (s *state3) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go index 5980ef769..1faf30c09 100644 --- a/chain/actors/builtin/miner/v4.go +++ b/chain/actors/builtin/miner/v4.go @@ -62,7 +62,7 @@ func (s *state4) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go index 886300ea3..be4b5e0b2 100644 --- a/chain/actors/builtin/miner/v5.go +++ b/chain/actors/builtin/miner/v5.go @@ -62,7 +62,7 @@ func (s *state5) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v6.go b/chain/actors/builtin/miner/v6.go index 4737b0ee2..fa8c30e40 100644 --- a/chain/actors/builtin/miner/v6.go +++ b/chain/actors/builtin/miner/v6.go @@ -62,7 +62,7 @@ func (s *state6) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v7.go b/chain/actors/builtin/miner/v7.go index 72803eb75..d6bb0e16e 100644 --- a/chain/actors/builtin/miner/v7.go +++ b/chain/actors/builtin/miner/v7.go @@ -62,7 +62,7 @@ func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v8.go b/chain/actors/builtin/miner/v8.go index 3e3739591..06a205e76 100644 --- a/chain/actors/builtin/miner/v8.go +++ b/chain/actors/builtin/miner/v8.go @@ -62,7 +62,7 @@ func (s *state8) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/miner/v9.go b/chain/actors/builtin/miner/v9.go index 72d9dbd59..6cbbd509e 100644 --- a/chain/actors/builtin/miner/v9.go +++ b/chain/actors/builtin/miner/v9.go @@ -62,7 +62,7 @@ func (s *state9) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmoun available = abi.NewTokenAmount(0) } }() - // this panics if the miner doesnt have enough funds to cover their locked pledge + // this panics if the miner doesn't have enough funds to cover their locked pledge available, err = s.GetAvailableBalance(bal) return available, err } diff --git a/chain/actors/builtin/paych/actor.go.template b/chain/actors/builtin/paych/actor.go.template index e19ac5e29..3498a7a49 100644 --- a/chain/actors/builtin/paych/actor.go.template +++ b/chain/actors/builtin/paych/actor.go.template @@ -29,7 +29,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -// Load returns an abstract copy of payment channel state, irregardless of actor version +// Load returns an abstract copy of payment channel state, regardless of actor version func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name != manifest.PaychKey { diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index 8a7979e95..fc8908cb4 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -29,7 +29,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -// Load returns an abstract copy of payment channel state, irregardless of actor version +// Load returns an abstract copy of payment channel state, regardless of actor version func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name != manifest.PaychKey { diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template index 9b779a68d..991c6717b 100644 --- a/chain/actors/builtin/verifreg/actor.go.template +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -97,4 +97,6 @@ func AllCodes() []cid.Cid { type Allocation = verifregtypes.Allocation type AllocationId = verifregtypes.AllocationId type Claim = verifregtypes.Claim -type ClaimId = verifregtypes.ClaimId \ No newline at end of file +type ClaimId = verifregtypes.ClaimId + +const NoAllocationID = verifregtypes.NoAllocationID diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index e79a790c7..de906f521 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -168,3 +168,5 @@ type Allocation = verifregtypes.Allocation type AllocationId = verifregtypes.AllocationId type Claim = verifregtypes.Claim type ClaimId = verifregtypes.ClaimId + +const NoAllocationID = verifregtypes.NoAllocationID diff --git a/chain/actors/manifest.go b/chain/actors/manifest.go index f58768ca2..62c17193a 100644 --- a/chain/actors/manifest.go +++ b/chain/actors/manifest.go @@ -51,6 +51,12 @@ func RegisterManifest(av actorstypes.Version, manifestCid cid.Cid, entries map[s } } +func AddActorMeta(name string, codeId cid.Cid, av actorstypes.Version) { + manifestMx.Lock() + defer manifestMx.Unlock() + actorMeta[codeId] = actorEntry{name: name, version: av} +} + // GetManifest gets a loaded manifest. func GetManifest(av actorstypes.Version) (cid.Cid, bool) { manifestMx.RLock() diff --git a/chain/actors/params.go b/chain/actors/params.go index f09b0be55..866c72b99 100644 --- a/chain/actors/params.go +++ b/chain/actors/params.go @@ -13,7 +13,7 @@ import ( func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) { buf := new(bytes.Buffer) if err := i.MarshalCBOR(buf); err != nil { - // TODO: shouldnt this be a fatal error? + // TODO: shouldn't this be a fatal error? return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter") } return buf.Bytes(), nil diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index 577b65947..6d2b41154 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -560,8 +560,53 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { return ChainFinality } -func GetMaxSectorExpirationExtension() abi.ChainEpoch { - return miner12.MaxSectorExpirationExtension +func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) { + v, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return 0, xerrors.Errorf("failed to get actors version: %w", err) + } + switch v { + + case actorstypes.Version0: + return miner0.MaxSectorExpirationExtension, nil + + case actorstypes.Version2: + return miner2.MaxSectorExpirationExtension, nil + + case actorstypes.Version3: + return miner3.MaxSectorExpirationExtension, nil + + case actorstypes.Version4: + return miner4.MaxSectorExpirationExtension, nil + + case actorstypes.Version5: + return miner5.MaxSectorExpirationExtension, nil + + case actorstypes.Version6: + return miner6.MaxSectorExpirationExtension, nil + + case actorstypes.Version7: + return miner7.MaxSectorExpirationExtension, nil + + case actorstypes.Version8: + return miner8.MaxSectorExpirationExtension, nil + + case actorstypes.Version9: + return miner9.MaxSectorExpirationExtension, nil + + case actorstypes.Version10: + return miner10.MaxSectorExpirationExtension, nil + + case actorstypes.Version11: + return miner11.MaxSectorExpirationExtension, nil + + case actorstypes.Version12: + return miner12.MaxSectorExpirationExtension, nil + + default: + return 0, xerrors.Errorf("unsupported network version") + } + } func GetMinSectorExpiration() abi.ChainEpoch { @@ -577,7 +622,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e if err != nil { return 0, err } - return int(uint64(maxSectors) / sectorsPerPart), nil + + return min(miner12.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil } func GetDefaultAggregationProof() abi.RegisteredAggregationProof { @@ -838,3 +884,10 @@ func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.Registere } return sealProof, nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template index 55a7c844e..d13518e0a 100644 --- a/chain/actors/policy/policy.go.template +++ b/chain/actors/policy/policy.go.template @@ -223,8 +223,20 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { return ChainFinality } -func GetMaxSectorExpirationExtension() abi.ChainEpoch { - return miner{{.latestVersion}}.MaxSectorExpirationExtension +func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) { + v, err := actorstypes.VersionForNetwork(nv) + if err != nil { + return 0, xerrors.Errorf("failed to get actors version: %w", err) + } + switch v { + {{range .versions}} + case actorstypes.Version{{.}}: + return miner{{.}}.MaxSectorExpirationExtension, nil + {{end}} + default: + return 0, xerrors.Errorf("unsupported network version") + } + } func GetMinSectorExpiration() abi.ChainEpoch { @@ -240,7 +252,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e if err != nil { return 0, err } - return int(uint64(maxSectors) / sectorsPerPart), nil + + return min(miner{{.latestVersion}}.PoStedPartitionsMax, int(uint64(maxSectors) / sectorsPerPart)), nil } func GetDefaultAggregationProof() abi.RegisteredAggregationProof { @@ -346,3 +359,10 @@ func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.Registere } return sealProof, nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go index 726fca95a..5fd4bd7b0 100644 --- a/chain/actors/policy/policy_test.go +++ b/chain/actors/policy/policy_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych" @@ -74,13 +73,3 @@ func TestPartitionSizes(t *testing.T) { require.Equal(t, sizeOld, sizeNew) } } - -func TestPoStSize(t *testing.T) { - //stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001 - v12PoStSize, err := GetMaxPoStPartitions(network.Version12, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1) - require.Equal(t, 4, v12PoStSize) - require.NoError(t, err) - v13PoStSize, err := GetMaxPoStPartitions(network.Version13, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1) - require.NoError(t, err) - require.Equal(t, 10, v13PoStSize) -} diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index 7269139ca..7434241a5 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -17,7 +17,7 @@ import ( ) func TestPrintGroupInfo(t *testing.T) { - server := build.DrandConfigs[build.DrandDevnet].Servers[0] + server := build.DrandConfigs[build.DrandTestnet].Servers[0] c, err := hclient.New(server, nil, nil) assert.NoError(t, err) cg := c.(interface { @@ -31,7 +31,7 @@ func TestPrintGroupInfo(t *testing.T) { func TestMaxBeaconRoundForEpoch(t *testing.T) { todayTs := uint64(1652222222) - db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandDevnet]) + db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandTestnet]) assert.NoError(t, err) mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100) mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100) diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go index 1edeb60b7..4b993b3e7 100644 --- a/chain/consensus/compute_state.go +++ b/chain/consensus/compute_state.go @@ -147,9 +147,6 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, return xerrors.Errorf("callback failed on cron message: %w", err) } } - if ret.ExitCode != 0 { - return xerrors.Errorf("cron exit was non-zero: %d", ret.ExitCode) - } return nil } diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index b5ec13a60..bb70d5d11 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -91,9 +91,6 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito } } - if ret.ExitCode != 0 { - return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) - } return nil } diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index 16abec6a8..c1f594a92 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -1,6 +1,7 @@ package filcns import ( + "bytes" "context" _ "embed" "fmt" @@ -19,7 +20,9 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" nv18 "github.com/filecoin-project/go-state-types/builtin/v10/migration" + init11 "github.com/filecoin-project/go-state-types/builtin/v11/init" nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration" + system11 "github.com/filecoin-project/go-state-types/builtin/v11/system" nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration" nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration" "github.com/filecoin-project/go-state-types/manifest" @@ -273,6 +276,10 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { StopWithin: 10, }}, Expensive: true, + }, { + Height: build.UpgradeWatermelonFixHeight, + Network: network.Version21, + Migration: upgradeActorsV12Fix, }, } @@ -1869,6 +1876,8 @@ func UpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.M return newRoot, nil } +var calibnetv12BuggyBundle = cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs") + func upgradeActorsV12Common( ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, @@ -1894,13 +1903,50 @@ func upgradeActorsV12Common( ) } - manifest, ok := actors.GetManifest(actorstypes.Version12) - if !ok { - return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade") + // check whether or not this is a calibnet upgrade + // we do this because calibnet upgraded to a "wrong" actors bundle, which was then corrected + // we thus upgrade to calibrationnet-buggy in this upgrade + actorsIn, err := state.LoadStateTree(adtStore, root) + if err != nil { + return cid.Undef, xerrors.Errorf("loading state tree: %w", err) + } + + initActor, err := actorsIn.GetActor(builtin.InitActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) + } + + var initState init11.State + if err := adtStore.Get(ctx, initActor.Head, &initState); err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err) + } + + var manifestCid cid.Cid + if initState.NetworkName == "calibrationnet" { + embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version12, "calibrationnet-buggy") + if !ok { + return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle") + } + + var err error + manifestCid, err = bundle.LoadBundle(ctx, writeStore, bytes.NewReader(embedded)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err) + } + + if manifestCid != calibnetv12BuggyBundle { + return cid.Undef, xerrors.Errorf("didn't find expected buggy calibnet bundle manifest: %s != %s", manifestCid, calibnetv12BuggyBundle) + } + } else { + ok := false + manifestCid, ok = actors.GetManifest(actorstypes.Version12) + if !ok { + return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade") + } } // Perform the migration - newHamtRoot, err := nv21.MigrateStateTree(ctx, adtStore, manifest, stateRoot.Actors, epoch, config, + newHamtRoot, err := nv21.MigrateStateTree(ctx, adtStore, manifestCid, stateRoot.Actors, epoch, config, migrationLogger{}, cache) if err != nil { return cid.Undef, xerrors.Errorf("upgrading to actors v12: %w", err) @@ -1928,6 +1974,139 @@ func upgradeActorsV12Common( return newRoot, nil } +////////////////////// + +var calibnetv12BuggyMinerCID = cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq") + +func upgradeActorsV12Fix(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + stateStore := sm.ChainStore().StateBlockstore() + adtStore := store.ActorStore(ctx, stateStore) + + // ensure that the manifest is loaded in the blockstore + if err := bundle.LoadBundles(ctx, stateStore, actorstypes.Version12); err != nil { + return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err) + } + + // Load input state tree + actorsIn, err := state.LoadStateTree(adtStore, root) + if err != nil { + return cid.Undef, xerrors.Errorf("loading state tree: %w", err) + } + + // load old manifest data + systemActor, err := actorsIn.GetActor(builtin.SystemActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) + } + + var systemState system11.State + if err := adtStore.Get(ctx, systemActor.Head, &systemState); err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err) + } + + var oldManifestData manifest.ManifestData + if err := adtStore.Get(ctx, systemState.BuiltinActors, &oldManifestData); err != nil { + return cid.Undef, xerrors.Errorf("failed to get old manifest data: %w", err) + } + + newManifestCID, ok := actors.GetManifest(actorstypes.Version12) + if !ok { + return cid.Undef, xerrors.Errorf("no manifest CID for v12 upgrade") + } + + // load new manifest + var newManifest manifest.Manifest + if err := adtStore.Get(ctx, newManifestCID, &newManifest); err != nil { + return cid.Undef, xerrors.Errorf("error reading actor manifest: %w", err) + } + + if err := newManifest.Load(ctx, adtStore); err != nil { + return cid.Undef, xerrors.Errorf("error loading actor manifest: %w", err) + } + + // build the CID mapping + codeMapping := make(map[cid.Cid]cid.Cid, len(oldManifestData.Entries)) + for _, oldEntry := range oldManifestData.Entries { + newCID, ok := newManifest.Get(oldEntry.Name) + if !ok { + return cid.Undef, xerrors.Errorf("missing manifest entry for %s", oldEntry.Name) + } + + // Note: we expect newCID to be the same as oldEntry.Code for all actors except the miner actor + codeMapping[oldEntry.Code] = newCID + } + + // Create empty actorsOut + + actorsOut, err := state.NewStateTree(adtStore, actorsIn.Version()) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create new tree: %w", err) + } + + // Perform the migration + err = actorsIn.ForEach(func(a address.Address, actor *types.Actor) error { + newCid, ok := codeMapping[actor.Code] + if !ok { + return xerrors.Errorf("didn't find mapping for %s", actor.Code) + } + + return actorsOut.SetActor(a, &types.ActorV5{ + Code: newCid, + Head: actor.Head, + Nonce: actor.Nonce, + Balance: actor.Balance, + Address: actor.Address, + }) + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to perform migration: %w", err) + } + + err = actorsIn.ForEach(func(a address.Address, inActor *types.Actor) error { + outActor, err := actorsOut.GetActor(a) + if err != nil { + return xerrors.Errorf("failed to get actor in outTree: %w", err) + } + + if inActor.Nonce != outActor.Nonce { + return xerrors.Errorf("mismatched nonce for actor %s", a) + } + + if !inActor.Balance.Equals(outActor.Balance) { + return xerrors.Errorf("mismatched balance for actor %s: %d != %d", a, inActor.Balance, outActor.Balance) + } + + if inActor.Address != outActor.Address && inActor.Address.String() != outActor.Address.String() { + return xerrors.Errorf("mismatched address for actor %s: %s != %s", a, inActor.Address, outActor.Address) + } + + if inActor.Head != outActor.Head { + return xerrors.Errorf("mismatched head for actor %s", a) + } + + // This is the hard-coded "buggy" miner actor Code ID + if inActor.Code != calibnetv12BuggyMinerCID && inActor.Code != outActor.Code { + return xerrors.Errorf("unexpected change in code for actor %s", a) + } + + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to sanity check migration: %w", err) + } + + // Persist the result. + newRoot, err := actorsOut.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + return newRoot, nil +} + +//////////////////// + // Example upgrade function if upgrade requires only code changes //func UpgradeActorsV9(ctx context.Context, sm *stmgr.StateManager, _ stmgr.MigrationCache, _ stmgr.ExecMonitor, root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) { // buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync()) diff --git a/chain/events/events_height.go b/chain/events/events_height.go index 457933fc6..5789be753 100644 --- a/chain/events/events_height.go +++ b/chain/events/events_height.go @@ -180,7 +180,7 @@ func (e *heightEventsObserver) Revert(ctx context.Context, from, to *types.TipSe // Update the head first so we don't accidental skip reverting a concurrent call to ChainAt. e.updateHead(to) - // Call revert on all hights between the two tipsets, handling empty tipsets. + // Call revert on all heights between the two tipsets, handling empty tipsets. for h := from.Height(); h > to.Height(); h-- { e.lk.Lock() triggers := e.tsHeights[h] diff --git a/chain/events/events_test.go b/chain/events/events_test.go index e2450909c..f16434355 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -673,7 +673,7 @@ func TestCalled(t *testing.T) { }, 3, 20, matchAddrMethod(t0123, 5)) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) @@ -991,7 +991,7 @@ func TestCalledNull(t *testing.T) { }, 3, 20, matchAddrMethod(t0123, 5)) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) @@ -1050,7 +1050,7 @@ func TestRemoveTriggersOnMessage(t *testing.T) { }, 3, 20, matchAddrMethod(t0123, 5)) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) @@ -1155,7 +1155,7 @@ func TestStateChanged(t *testing.T) { }) require.NoError(t, err) - // create few blocks to make sure nothing get's randomly called + // create few blocks to make sure nothing gets randomly called fcs.advance(0, 4, 0, nil) // H=5 require.Equal(t, false, applied) diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index b821a2f83..24192a53e 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -388,7 +388,7 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { // Filter needs historic events - if err := m.EventIndex.PrefillFilter(ctx, f); err != nil { + if err := m.EventIndex.PrefillFilter(ctx, f, true); err != nil { return nil, err } } diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index bacba60d7..2b1890c73 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -481,7 +481,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // PrefillFilter fills a filter's collection of events from the historic index -func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error { +func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, excludeReverted bool) error { clauses := []string{} values := []any{} joins := []string{} @@ -500,6 +500,11 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter) error { } } + if excludeReverted { + clauses = append(clauses, "event.reverted=?") + values = append(values, false) + } + if len(f.addresses) > 0 { subclauses := []string{} for _, addr := range f.addresses { diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go index fcdb1ab05..f9b1b14ad 100644 --- a/chain/events/filter/index_test.go +++ b/chain/events/filter/index_test.go @@ -272,7 +272,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { for _, tc := range testCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter); err != nil { + if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { require.NoError(t, err, "prefill filter events") } @@ -281,3 +281,619 @@ func TestEventIndexPrefillFilter(t *testing.T) { }) } } + +func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { + rng := pseudo.New(pseudo.NewSource(299792458)) + a1 := randomF4Addr(t, rng) + a2 := randomF4Addr(t, rng) + a3 := randomF4Addr(t, rng) + + a1ID := abi.ActorID(1) + a2ID := abi.ActorID(2) + + addrMap := addressMap{} + addrMap.add(a1ID, a1) + addrMap.add(a2ID, a2) + + ev1 := fakeEvent( + a1ID, + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + ev2 := fakeEvent( + a2ID, + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988182")}, + }, + ) + + st := newStore() + events := []*types.Event{ev1} + revertedEvents := []*types.Event{ev2} + em := executedMessage{ + msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), + rct: fakeReceipt(t, rng, st, events), + evs: events, + } + revertedEm := executedMessage{ + msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), + rct: fakeReceipt(t, rng, st, revertedEvents), + evs: revertedEvents, + } + + events14000 := buildTipSetEvents(t, rng, 14000, em) + revertedEvents14000 := buildTipSetEvents(t, rng, 14000, revertedEm) + cid14000, err := events14000.msgTs.Key().Cid() + require.NoError(t, err, "tipset cid") + reveredCID14000, err := revertedEvents14000.msgTs.Key().Cid() + require.NoError(t, err, "tipset cid") + + noCollectedEvents := []*CollectedEvent{} + oneCollectedEvent := []*CollectedEvent{ + { + Entries: ev1.Entries, + EmitterAddr: a1, + EventIdx: 0, + Reverted: false, + Height: 14000, + TipSetKey: events14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: em.msg.Cid(), + }, + } + twoCollectedEvent := []*CollectedEvent{ + { + Entries: ev1.Entries, + EmitterAddr: a1, + EventIdx: 0, + Reverted: false, + Height: 14000, + TipSetKey: events14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: em.msg.Cid(), + }, + { + Entries: ev2.Entries, + EmitterAddr: a2, + EventIdx: 0, + Reverted: true, + Height: 14000, + TipSetKey: revertedEvents14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: revertedEm.msg.Cid(), + }, + } + oneCollectedRevertedEvent := []*CollectedEvent{ + { + Entries: ev2.Entries, + EmitterAddr: a2, + EventIdx: 0, + Reverted: true, + Height: 14000, + TipSetKey: revertedEvents14000.msgTs.Key(), + MsgIdx: 0, + MsgCid: revertedEm.msg.Cid(), + }, + } + + workDir, err := os.MkdirTemp("", "lotusevents") + require.NoError(t, err, "create temporary work directory") + + defer func() { + _ = os.RemoveAll(workDir) + }() + t.Logf("using work dir %q", workDir) + + dbPath := filepath.Join(workDir, "actorevents.db") + + ei, err := NewEventIndex(context.Background(), dbPath, nil) + require.NoError(t, err, "create event index") + if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil { + require.NoError(t, err, "collect reverted events") + } + if err := ei.CollectEvents(context.Background(), revertedEvents14000, true, addrMap.ResolveAddress); err != nil { + require.NoError(t, err, "revert reverted events") + } + if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { + require.NoError(t, err, "collect events") + } + + inclusiveTestCases := []struct { + name string + filter *EventFilter + te *TipSetEvents + want []*CollectedEvent + }{ + { + name: "nomatch tipset min height", + filter: &EventFilter{ + minHeight: 14001, + maxHeight: -1, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch tipset max height", + filter: &EventFilter{ + minHeight: -1, + maxHeight: 13999, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match tipset min height", + filter: &EventFilter{ + minHeight: 14000, + maxHeight: -1, + }, + te: events14000, + want: twoCollectedEvent, + }, + { + name: "match tipset cid", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: cid14000, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match tipset cid", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: reveredCID14000, + }, + te: revertedEvents14000, + want: oneCollectedRevertedEvent, + }, + { + name: "nomatch address", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a3}, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match address 2", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a2}, + }, + te: revertedEvents14000, + want: oneCollectedRevertedEvent, + }, + { + name: "match address 1", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a1}, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: twoCollectedEvent, + }, + { + name: "match one entry with alternate values", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + []byte("approval"), + }, + }, + }, + te: events14000, + want: twoCollectedEvent, + }, + { + name: "nomatch one entry by missing value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry by missing key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "method": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match one entry with multiple keys", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry with multiple keys", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr2"), + }, + }, + }, + te: revertedEvents14000, + want: oneCollectedRevertedEvent, + }, + { + name: "nomatch one entry with one mismatching key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "approver": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one mismatching value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr3"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one unindexed key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "amount": { + []byte("2988181"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one unindexed key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "amount": { + []byte("2988182"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + } + + exclusiveTestCases := []struct { + name string + filter *EventFilter + te *TipSetEvents + want []*CollectedEvent + }{ + { + name: "nomatch tipset min height", + filter: &EventFilter{ + minHeight: 14001, + maxHeight: -1, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch tipset max height", + filter: &EventFilter{ + minHeight: -1, + maxHeight: 13999, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match tipset min height", + filter: &EventFilter{ + minHeight: 14000, + maxHeight: -1, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match tipset cid", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: cid14000, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match tipset cid but reverted", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + tipsetCid: reveredCID14000, + }, + te: revertedEvents14000, + want: noCollectedEvents, + }, + { + name: "nomatch address", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a3}, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch address 2 but reverted", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a2}, + }, + te: revertedEvents14000, + want: noCollectedEvents, + }, + { + name: "match address", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + addresses: []address.Address{a1}, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "match one entry with alternate values", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + []byte("approval"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "nomatch one entry by missing value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("cancel"), + []byte("propose"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry by missing key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "method": { + []byte("approval"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "match one entry with multiple keys", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: oneCollectedEvent, + }, + { + name: "nomatch one entry with one mismatching key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "approver": { + []byte("addr1"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with matching reverted value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr2"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one mismatching value", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "type": { + []byte("approval"), + }, + "signer": { + []byte("addr3"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + { + name: "nomatch one entry with one unindexed key", + filter: &EventFilter{ + minHeight: -1, + maxHeight: -1, + keys: map[string][][]byte{ + "amount": { + []byte("2988181"), + }, + }, + }, + te: events14000, + want: noCollectedEvents, + }, + } + + for _, tc := range inclusiveTestCases { + tc := tc // appease lint + t.Run(tc.name, func(t *testing.T) { + if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { + require.NoError(t, err, "prefill filter events") + } + + coll := tc.filter.TakeCollectedEvents(context.Background()) + require.ElementsMatch(t, coll, tc.want, tc.name) + }) + } + + for _, tc := range exclusiveTestCases { + tc := tc // appease lint + t.Run(tc.name, func(t *testing.T) { + if err := ei.PrefillFilter(context.Background(), tc.filter, true); err != nil { + require.NoError(t, err, "prefill filter events") + } + + coll := tc.filter.TakeCollectedEvents(context.Background()) + require.ElementsMatch(t, coll, tc.want, tc.name) + }) + } +} diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go index d1eb271e9..e66b6d798 100644 --- a/chain/exchange/cbor_gen.go +++ b/chain/exchange/cbor_gen.go @@ -43,9 +43,11 @@ func (t *Request) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Head { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Head: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } // t.Length (uint64) (uint64) @@ -106,12 +108,25 @@ func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Head failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Head[i]: %w", err) + } + + t.Head[i] = c + + } } - t.Head[i] = c } // t.Length (uint64) (uint64) @@ -173,7 +188,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ErrorMessage))); err != nil { return err } - if _, err := io.WriteString(w, string(t.ErrorMessage)); err != nil { + if _, err := cw.WriteString(string(t.ErrorMessage)); err != nil { return err } @@ -260,13 +275,32 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v BSTipSet - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Chain[i] = new(BSTipSet) + if err := t.Chain[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Chain[i] pointer: %w", err) + } + } + + } } - - t.Chain[i] = &v } return nil @@ -317,9 +351,11 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error { return err } for _, v := range v { - if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil { + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(v)); err != nil { return err } + } } @@ -354,9 +390,11 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error { return err } for _, v := range v { - if err := cw.CborWriteHeader(cbg.MajUnsignedInt, uint64(v)); err != nil { + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(v)); err != nil { return err } + } } return nil @@ -405,13 +443,32 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v types.Message - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Bls[i] = new(types.Message) + if err := t.Bls[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Bls[i] pointer: %w", err) + } + } + + } } - - t.Bls[i] = &v } // t.BlsIncludes ([][]uint64) (slice) @@ -438,6 +495,9 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) { var maj byte var extra uint64 var err error + _ = maj + _ = extra + _ = err maj, extra, err = cr.ReadHeader() if err != nil { @@ -457,17 +517,27 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) { } for j := 0; j < int(extra); j++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - maj, val, err := cr.ReadHeader() - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.BlsIncludes[i] slice: %w", err) + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BlsIncludes[i][j] = uint64(extra) + + } } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.BlsIncludes[i] was not a uint, instead got %d", maj) - } - - t.BlsIncludes[i][j] = uint64(val) } } @@ -493,13 +563,32 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v types.SignedMessage - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Secpk[i] = new(types.SignedMessage) + if err := t.Secpk[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Secpk[i] pointer: %w", err) + } + } + + } } - - t.Secpk[i] = &v } // t.SecpkIncludes ([][]uint64) (slice) @@ -526,6 +615,9 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) { var maj byte var extra uint64 var err error + _ = maj + _ = extra + _ = err maj, extra, err = cr.ReadHeader() if err != nil { @@ -545,17 +637,27 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) (err error) { } for j := 0; j < int(extra); j++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - maj, val, err := cr.ReadHeader() - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.SecpkIncludes[i] slice: %w", err) + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SecpkIncludes[i][j] = uint64(extra) + + } } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.SecpkIncludes[i] was not a uint, instead got %d", maj) - } - - t.SecpkIncludes[i][j] = uint64(val) } } @@ -642,13 +744,32 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v types.BlockHeader - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Blocks[i] = new(types.BlockHeader) + if err := t.Blocks[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Blocks[i] pointer: %w", err) + } + } + + } } - - t.Blocks[i] = &v } // t.Messages (exchange.CompactedMessages) (struct) diff --git a/chain/exchange/client.go b/chain/exchange/client.go index db39628be..120b554a1 100644 --- a/chain/exchange/client.go +++ b/chain/exchange/client.go @@ -242,7 +242,7 @@ func (c *client) processResponse(req *Request, res *Response, tipsets []*types.T // If we didn't request the headers they should have been provided // by the caller. if len(tipsets) < len(res.Chain) { - return nil, xerrors.Errorf("not enought tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) + return nil, xerrors.Errorf("not enough tipsets provided for message response validation, needed %d, have %d", len(res.Chain), len(tipsets)) } chain := make([]*BSTipSet, 0, resLength) for i, resChain := range res.Chain { @@ -381,7 +381,7 @@ func (c *client) sendRequestToPeer(ctx context.Context, peer peer.ID, req *Reque defer span.End() if span.IsRecordingEvents() { span.AddAttributes( - trace.StringAttribute("peer", peer.Pretty()), + trace.StringAttribute("peer", peer.String()), ) } defer func() { diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 3e8848021..8ec657479 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -578,7 +578,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto } // setup Storage Miners - stateroot, err = SetupStorageMiners(ctx, cs, sys, stateroot, template.Miners, template.NetworkVersion) + stateroot, err = SetupStorageMiners(ctx, cs, sys, stateroot, template.Miners, template.NetworkVersion, false) if err != nil { return nil, xerrors.Errorf("setup miners failed: %w", err) } diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 0880f12aa..df8900cab 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -79,7 +79,7 @@ func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { } // Note: Much of this is brittle, if the methodNum / param / return changes, it will break things -func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.SyscallBuilder, sroot cid.Cid, miners []genesis.Miner, nv network.Version) (cid.Cid, error) { +func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.SyscallBuilder, sroot cid.Cid, miners []genesis.Miner, nv network.Version, synthetic bool) (cid.Cid, error) { cst := cbor.NewCborStore(cs.StateBlockstore()) av, err := actorstypes.VersionForNetwork(nv) @@ -125,14 +125,18 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal sectorWeight []abi.StoragePower }, len(miners)) - maxPeriods := policy.GetMaxSectorExpirationExtension() / minertypes.WPoStProvingPeriod + maxLifetime, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get max extension: %w", err) + } + maxPeriods := maxLifetime / minertypes.WPoStProvingPeriod rawPow, qaPow := big.NewInt(0), big.NewInt(0) for i, m := range miners { // Create miner through power actor i := i m := m - spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv) + spt, err := miner.SealProofTypeFromSectorSize(m.SectorSize, nv, synthetic) if err != nil { return cid.Undef, err } diff --git a/chain/index/msgindex.go b/chain/index/msgindex.go index 27eeea73e..e9e81ae2c 100644 --- a/chain/index/msgindex.go +++ b/chain/index/msgindex.go @@ -131,7 +131,7 @@ func NewMsgIndex(lctx context.Context, basePath string, cs ChainStore) (MsgIndex db, err := sql.Open("sqlite3", dbPath) if err != nil { - // TODO [nice to have]: automaticaly delete corrupt databases + // TODO [nice to have]: automatically delete corrupt databases // but for now we can just error and let the operator delete. return nil, xerrors.Errorf("error opening msgindex database: %w", err) } diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 6dc3f2239..7d55b0b16 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -21,6 +21,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/minio/blake2b-simd" "github.com/raulk/clock" + "go.opencensus.io/stats" "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" @@ -210,8 +211,10 @@ func ComputeRBF(curPrem abi.TokenAmount, replaceByFeeRatio types.Percent) abi.To func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) { var maxFee abi.TokenAmount + var maximizeFeeCap bool if sendSpec != nil { maxFee = sendSpec.MaxFee + maximizeFeeCap = sendSpec.MaximizeFeeCap } if maxFee.Int == nil || maxFee.Equals(big.Zero()) { mf, err := mff() @@ -222,15 +225,12 @@ func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.M maxFee = mf } - gl := types.NewInt(uint64(msg.GasLimit)) - totalFee := types.BigMul(msg.GasFeeCap, gl) - - if totalFee.LessThanEqual(maxFee) { - msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap - return + gaslimit := types.NewInt(uint64(msg.GasLimit)) + totalFee := types.BigMul(msg.GasFeeCap, gaslimit) + if maximizeFeeCap || totalFee.GreaterThan(maxFee) { + msg.GasFeeCap = big.Div(maxFee, gaslimit) } - msg.GasFeeCap = big.Div(maxFee, gl) msg.GasPremium = big.Min(msg.GasFeeCap, msg.GasPremium) // cap premium at FeeCap } @@ -1022,6 +1022,9 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st } }) + // Record the current size of the Mpool + stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize))) + return nil } @@ -1214,6 +1217,9 @@ func (mp *MessagePool) remove(ctx context.Context, from address.Address, nonce u return } } + + // Record the current size of the Mpool + stats.Record(ctx, metrics.MpoolMessageCount.M(int64(mp.currentSize))) } func (mp *MessagePool) Pending(ctx context.Context) ([]*types.SignedMessage, *types.TipSet) { diff --git a/chain/state/statetree.go b/chain/state/statetree.go index b527fc1ac..61d7d500a 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -571,7 +571,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error } // no need to record anything here, there are no duplicates in the actors HAMT - // iself. + // itself. if _, ok := seen[addr]; ok { return nil } @@ -589,7 +589,7 @@ func (st *StateTree) ForEach(f func(address.Address, *types.Actor) error) error } // no need to record anything here, there are no duplicates in the actors HAMT - // iself. + // itself. if _, ok := seen[addr]; ok { return nil } diff --git a/chain/store/messages.go b/chain/store/messages.go index 3686f74f4..c23f900d7 100644 --- a/chain/store/messages.go +++ b/chain/store/messages.go @@ -212,13 +212,8 @@ func (cs *ChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ( var out []types.ChainMsg for _, bm := range bmsgs { - for _, blsm := range bm.BlsMessages { - out = append(out, blsm) - } - - for _, secm := range bm.SecpkMessages { - out = append(out, secm) - } + out = append(out, bm.BlsMessages...) + out = append(out, bm.SecpkMessages...) } return out, nil diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go index 5e218fa36..301a5f87b 100644 --- a/chain/store/snapshot.go +++ b/chain/store/snapshot.go @@ -167,7 +167,7 @@ func (t walkSchedTaskType) String() string { case dagTask: return "dag" } - panic(fmt.Sprintf("unknow task %d", t)) + panic(fmt.Sprintf("unknown task %d", t)) } type walkTask struct { @@ -656,9 +656,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe } if b.Height > 0 { - for _, p := range b.Parents { - blocksToWalk = append(blocksToWalk, p) - } + blocksToWalk = append(blocksToWalk, b.Parents...) } else { // include the genesis block cids = append(cids, b.Parents...) diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 3a11f7c98..b50ddc467 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -516,7 +516,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg return pubsub.ValidationReject } if len(idxrMsg.ExtraData) == 0 { - log.Debugw("ignoring messsage missing miner id", "peer", originPeer) + log.Debugw("ignoring message missing miner id", "peer", originPeer) return pubsub.ValidationIgnore } @@ -552,7 +552,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg // Check that the miner ID maps to the peer that sent the message. err = v.authenticateMessage(ctx, minerAddr, originPeer) if err != nil { - log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerAddr) + log.Warnw("cannot authenticate message", "err", err, "peer", originPeer, "minerID", minerAddr) stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1)) return pubsub.ValidationReject } diff --git a/chain/sync.go b/chain/sync.go index 6341deeeb..1b9a302f7 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -844,7 +844,7 @@ loop: return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } if base.IsChildOf(knownParent) { - // common case: receiving a block thats potentially part of the same tipset as our best block + // common case: receiving a block that's potentially part of the same tipset as our best block return blockSet, nil } @@ -1094,8 +1094,8 @@ func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet requestErr = multierror.Append(requestErr, err) } else { isGood := true - for index, ts := range headers[nextI:lastI] { - cm := result[index] + for index, cm := range result { + ts := headers[nextI+index] if err := checkMsgMeta(ts, cm.Bls, cm.Secpk, cm.BlsIncludes, cm.SecpkIncludes); err != nil { log.Errorf("fetched messages not as expected: %s", err) isGood = false diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index a9040613f..fe8e7e3fe 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -91,9 +91,11 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Parents { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Parents: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } // t.ParentWeight (big.Int) (struct) @@ -249,13 +251,22 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v BeaconEntry - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.BeaconEntries[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.BeaconEntries[i]: %w", err) + } + + } } - - t.BeaconEntries[i] = v } // t.WinPoStProof ([]proof.PoStProof) (slice) @@ -278,13 +289,22 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v proof.PoStProof - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.WinPoStProof[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.WinPoStProof[i]: %w", err) + } + + } } - - t.WinPoStProof[i] = v } // t.Parents ([]cid.Cid) (slice) @@ -307,12 +327,25 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Parents failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Parents[i]: %w", err) + } + + t.Parents[i] = c + + } } - t.Parents[i] = c } // t.ParentWeight (big.Int) (struct) @@ -1318,9 +1351,11 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.BlsMessages { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.BlsMessages: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } // t.SecpkMessages ([]cid.Cid) (slice) @@ -1332,9 +1367,11 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.SecpkMessages { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.SecpkMessages: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } return nil } @@ -1401,12 +1438,25 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.BlsMessages failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.BlsMessages[i]: %w", err) + } + + t.BlsMessages[i] = c + + } } - t.BlsMessages[i] = c } // t.SecpkMessages ([]cid.Cid) (slice) @@ -1429,12 +1479,25 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.SecpkMessages failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.SecpkMessages[i]: %w", err) + } + + t.SecpkMessages[i] = c + + } } - t.SecpkMessages[i] = c } return nil @@ -1463,9 +1526,11 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Cids { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Cids: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } // t.Blocks ([]*types.BlockHeader) (slice) @@ -1538,12 +1603,25 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Cids failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Cids[i]: %w", err) + } + + t.Cids[i] = c + + } } - t.Cids[i] = c } // t.Blocks ([]*types.BlockHeader) (slice) @@ -1566,13 +1644,32 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v BlockHeader - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Blocks[i] = new(BlockHeader) + if err := t.Blocks[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Blocks[i] pointer: %w", err) + } + } + + } } - - t.Blocks[i] = &v } // t.Height (abi.ChainEpoch) (int64) @@ -1933,13 +2030,22 @@ func (t *Event) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v EventEntry - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.Entries[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Entries[i]: %w", err) + } + + } } - - t.Entries[i] = v } return nil @@ -1972,7 +2078,7 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Key)); err != nil { + if _, err := cw.WriteString(string(t.Key)); err != nil { return err } @@ -2103,7 +2209,7 @@ func (t *GasTrace) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Name)); err != nil { + if _, err := cw.WriteString(string(t.Name)); err != nil { return err } @@ -2756,13 +2862,32 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v GasTrace - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.GasCharges[i] = new(GasTrace) + if err := t.GasCharges[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.GasCharges[i] pointer: %w", err) + } + } + + } } - - t.GasCharges[i] = &v } // t.Subcalls ([]types.ExecutionTrace) (slice) @@ -2785,13 +2910,22 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v ExecutionTrace - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.Subcalls[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Subcalls[i]: %w", err) + } + + } } - - t.Subcalls[i] = v } return nil diff --git a/chain/types/message_receipt_cbor.go b/chain/types/message_receipt_cbor.go index e1364e654..955ca4d85 100644 --- a/chain/types/message_receipt_cbor.go +++ b/chain/types/message_receipt_cbor.go @@ -140,7 +140,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -186,7 +186,7 @@ func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -278,7 +278,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: @@ -324,7 +324,7 @@ func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) { case cbg.MajNegativeInt: extraI = int64(extra) if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") + return fmt.Errorf("int64 negative overflow") } extraI = -1 - extraI default: diff --git a/cli/net.go b/cli/net.go index 516b44b7f..f25799e95 100644 --- a/cli/net.go +++ b/cli/net.go @@ -282,7 +282,7 @@ var NetDisconnect = &cli.Command{ fmt.Println("failure") return err } - fmt.Printf("disconnect %s: ", pid.Pretty()) + fmt.Printf("disconnect %s: ", pid) err = api.NetDisconnect(ctx, pid) if err != nil { fmt.Println("failure") @@ -312,7 +312,7 @@ var NetConnect = &cli.Command{ } for _, pi := range pis { - fmt.Printf("connect %s: ", pi.ID.Pretty()) + fmt.Printf("connect %s: ", pi.ID) err := api.NetConnect(ctx, pi) if err != nil { fmt.Println("failure") diff --git a/cli/state.go b/cli/state.go index 667f6fb19..31666a21c 100644 --- a/cli/state.go +++ b/cli/state.go @@ -1920,8 +1920,29 @@ var StateSysActorCIDsCmd = &cli.Command{ if err != nil { return err } - for name, cid := range actorsCids { - _, _ = fmt.Fprintf(tw, "%v\t%v\n", name, cid) + + var actorsCidTuples []struct { + actorName string + actorCid cid.Cid + } + + for name, actorCid := range actorsCids { + keyVal := struct { + actorName string + actorCid cid.Cid + }{ + actorName: name, + actorCid: actorCid, + } + actorsCidTuples = append(actorsCidTuples, keyVal) + } + + sort.Slice(actorsCidTuples, func(i, j int) bool { + return actorsCidTuples[i].actorName < actorsCidTuples[j].actorName + }) + + for _, keyVal := range actorsCidTuples { + _, _ = fmt.Fprintf(tw, "%v\t%v\n", keyVal.actorName, keyVal.actorCid) } return tw.Flush() }, diff --git a/cli/sync.go b/cli/sync.go index 89d2d94f0..18ff24bc2 100644 --- a/cli/sync.go +++ b/cli/sync.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + cliutil "github.com/filecoin-project/lotus/cli/util" ) var SyncCmd = &cli.Command{ @@ -262,6 +263,9 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error { } firstApp = state.VMApplied + // eta computes the ETA for the sync to complete (with a lookback of 10 processed items) + eta := cliutil.NewETA(10) + for { state, err := napi.SyncState(ctx) if err != nil { @@ -312,8 +316,10 @@ func SyncWait(ctx context.Context, napi v0api.FullNode, watch bool) error { fmt.Print("\r\x1b[2K\x1b[A") } + todo := theight - ss.Height + fmt.Printf("Worker: %d; Base: %d; Target: %d (diff: %d)\n", workerID, baseHeight, theight, heightDiff) - fmt.Printf("State: %s; Current Epoch: %d; Todo: %d\n", ss.Stage, ss.Height, theight-ss.Height) + fmt.Printf("State: %s; Current Epoch: %d; Todo: %d, ETA: %s\n", ss.Stage, ss.Height, todo, eta.Update(int64(todo))) lastLines = 2 if i%samples == 0 { diff --git a/cli/util/eta.go b/cli/util/eta.go new file mode 100644 index 000000000..de06ec1ff --- /dev/null +++ b/cli/util/eta.go @@ -0,0 +1,94 @@ +package cliutil + +import ( + "fmt" + "math" + "time" +) + +// ETA implements a very simple eta calculator based on the number of remaining items. It does not +// require knowing the work size in advance and is therefore suitable for streaming workloads and +// also does not require that consecutive updates have a monotonically decreasing remaining value. +type ETA struct { + // max number of items to keep in memory + maxItems int + // a queue of most recently updated items + items []item + // we store the last calculated ETA which we reuse if there was not change in remaining items + lastETA string +} + +type item struct { + timestamp time.Time + remaining int64 +} + +// NewETA creates a new ETA calculator of the given size +func NewETA(maxItems int) *ETA { + return &ETA{ + maxItems: maxItems, + items: make([]item, 0), + } +} + +// Update updates the ETA calculator with the remaining number of items and returns the ETA +func (e *ETA) Update(remaining int64) string { + item := item{ + timestamp: time.Now(), + remaining: remaining, + } + + if len(e.items) == 0 { + e.items = append(e.items, item) + return "" + } + + if e.items[len(e.items)-1].remaining == remaining { + // we ignore updates with the same remaining value and just return the previous ETA + return e.lastETA + } else if e.items[len(e.items)-1].remaining < remaining { + // remaining went up from previous update, lets estimate how many items were processed using the + // average number processed items in the queue. + var avgProcessedPerItem int64 = 1 + if len(e.items) > 1 { + diffRemaining := e.items[0].remaining - e.items[len(e.items)-1].remaining + avgProcessedPerItem = int64(math.Round(float64(diffRemaining) / float64(len(e.items)))) + } + + // diff is the difference in increase in remaining since last update plus the average number of processed + // items we estimate that were processed this round + diff := remaining - e.items[len(e.items)-1].remaining + avgProcessedPerItem + + // we update all items in the queue by shifting their remaining value accordingly. This means that we + // always have strictly decreasing remaining values in the queue + for i := range e.items { + e.items[i].remaining += diff + } + } + + // append the item to the queue and remove the oldest item if needed + if len(e.items) >= e.maxItems { + e.items = e.items[1:] + } + e.items = append(e.items, item) + + // calculate the average processing time per item in the queue + diffMs := e.items[len(e.items)-1].timestamp.Sub(e.items[0].timestamp).Milliseconds() + nrItemsProcessed := e.items[0].remaining - e.items[len(e.items)-1].remaining + avg := diffMs / nrItemsProcessed + + // use that average processing time to estimate how long the remaining items will take + // and cache that ETA so we don't have to recalculate it on every update unless the + // remaining value changes + e.lastETA = msToETA(avg * remaining) + + return e.lastETA +} + +func msToETA(ms int64) string { + seconds := ms / 1000 + minutes := seconds / 60 + hours := minutes / 60 + + return fmt.Sprintf("%02dh:%02dm:%02ds", hours, minutes%60, seconds%60) +} diff --git a/cli/wallet.go b/cli/wallet.go index 628d6841d..faf7bc239 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/tablewriter" ) @@ -459,7 +460,12 @@ var walletSign = &cli.Command{ sig, err := api.WalletSign(ctx, addr, msg) if err != nil { - return err + // Check if the address is a multisig address + act, actErr := api.StateGetActor(ctx, addr, types.EmptyTSK) + if actErr == nil && builtin.IsMultisigActor(act.Code) { + return xerrors.Errorf("specified signer address is a multisig actor, it doesn’t have keys to sign transactions. To send a message with a multisig, signers of the multisig need to propose and approve transactions.") + } + return xerrors.Errorf("failed to sign message: %w", err) } sigBytes := append([]byte{byte(sig.Type)}, sig.Data...) diff --git a/cmd/lotus-bench/cli.go b/cmd/lotus-bench/cli.go new file mode 100644 index 000000000..0eaeb6ccb --- /dev/null +++ b/cmd/lotus-bench/cli.go @@ -0,0 +1,312 @@ +package main + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "os/signal" + "strconv" + "strings" + "sync" + "time" + + "github.com/urfave/cli/v2" +) + +var cliCmd = &cli.Command{ + Name: "cli", + Usage: "Runs a concurrent stress test on one or more binaries commands and prints the performance metrics including latency distribution and histogram", + Description: `This benchmark has the following features: +* Can query each command both sequentially and concurrently +* Supports rate limiting +* Can query multiple different commands at once (supporting different concurrency level and rate limiting for each command) +* Gives a nice reporting summary of the stress testing of each command (including latency distribution, histogram and more) +* Easy to use + +To use this benchmark you must specify the commands you want to test using the --cmd options, the format of it is: + + --cmd=CMD[:CONCURRENCY][:QPS] where only NAME is required. + +Here are some real examples: + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages' // runs the command with default concurrency and qps + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages:3' // override concurrency to 3 + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages::100' // override to 100 qps while using default concurrency + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages:3:100' // run using 3 workers but limit to 100 qps + lotus-bench cli --cmd='lotus-shed mpool miner-select-messages' --cmd='lotus sync wait' // run two commands at once +`, + Flags: []cli.Flag{ + &cli.DurationFlag{ + Name: "duration", + Value: 60 * time.Second, + Usage: "Duration of benchmark in seconds", + }, + &cli.IntFlag{ + Name: "concurrency", + Value: 10, + Usage: "How many workers should be used per command (can be overridden per command)", + }, + &cli.IntFlag{ + Name: "qps", + Value: 0, + Usage: "How many requests per second should be sent per command (can be overridden per command), a value of 0 means no limit", + }, + &cli.StringSliceFlag{ + Name: "cmd", + Usage: `Command to benchmark, you can specify multiple commands by repeating this flag. You can also specify command specific options to set the concurrency and qps for each command (see usage).`, + }, + &cli.DurationFlag{ + Name: "watch", + Value: 0 * time.Second, + Usage: "If >0 then generates reports every N seconds (only supports linux/unix)", + }, + &cli.BoolFlag{ + Name: "print-response", + Value: false, + Usage: "print the response of each request", + }, + }, + Action: func(cctx *cli.Context) error { + if len(cctx.StringSlice("cmd")) == 0 { + return errors.New("you must specify and least one cmd to benchmark") + } + + var cmds []*CMD + for _, str := range cctx.StringSlice("cmd") { + entries := strings.SplitN(str, ":", 3) + if len(entries) == 0 { + return errors.New("invalid cmd format") + } + + // check if concurrency was specified + concurrency := cctx.Int("concurrency") + if len(entries) > 1 { + if len(entries[1]) > 0 { + var err error + concurrency, err = strconv.Atoi(entries[1]) + if err != nil { + return fmt.Errorf("could not parse concurrency value from command %s: %v", entries[0], err) + } + } + } + + // check if qps was specified + qps := cctx.Int("qps") + if len(entries) > 2 { + if len(entries[2]) > 0 { + var err error + qps, err = strconv.Atoi(entries[2]) + if err != nil { + return fmt.Errorf("could not parse qps value from command %s: %v", entries[0], err) + } + } + } + + cmds = append(cmds, &CMD{ + w: os.Stdout, + cmd: entries[0], + concurrency: concurrency, + qps: qps, + printResp: cctx.Bool("print-response"), + }) + } + + // terminate early on ctrl+c + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + <-c + fmt.Println("Received interrupt, stopping...") + for _, cmd := range cmds { + cmd.Stop() + } + }() + + // stop all threads after duration + go func() { + time.Sleep(cctx.Duration("duration")) + for _, cmd := range cmds { + cmd.Stop() + } + }() + + // start all threads + var wg sync.WaitGroup + wg.Add(len(cmds)) + + for _, cmd := range cmds { + go func(cmd *CMD) { + defer wg.Done() + err := cmd.Run() + if err != nil { + fmt.Printf("error running cmd: %v\n", err) + } + }(cmd) + } + + // if watch is set then print a report every N seconds + var progressCh chan struct{} + if cctx.Duration("watch") > 0 { + progressCh = make(chan struct{}, 1) + go func(progressCh chan struct{}) { + ticker := time.NewTicker(cctx.Duration("watch")) + for { + clearAndPrintReport := func() { + // clear the screen move the cursor to the top left + fmt.Print("\033[2J") + fmt.Printf("\033[%d;%dH", 1, 1) + for i, cmd := range cmds { + cmd.Report() + if i < len(cmds)-1 { + fmt.Println() + } + } + } + select { + case <-ticker.C: + clearAndPrintReport() + case <-progressCh: + clearAndPrintReport() + return + } + } + }(progressCh) + } + + wg.Wait() + + if progressCh != nil { + // wait for the watch go routine to return + progressCh <- struct{}{} + + // no need to print the report again + return nil + } + + // print the report for each command + for i, cmd := range cmds { + cmd.Report() + if i < len(cmds)-1 { + fmt.Println() + } + } + + return nil + }, +} + +// CMD handles the benchmarking of a single command. +type CMD struct { + w io.Writer + // the cmd we want to benchmark + cmd string + // the number of concurrent requests to make to this command + concurrency int + // if >0 then limit to qps is the max number of requests per second to make to this command (0 = no limit) + qps int + // whether or not to print the response of each request (useful for debugging) + printResp bool + // instruct the worker go routines to stop + stopCh chan struct{} + // when the command bencharking started + start time.Time + // results channel is used by the workers to send results to the reporter + results chan *result + // reporter handles reading the results from workers and printing the report statistics + reporter *Reporter +} + +func (c *CMD) Run() error { + var wg sync.WaitGroup + wg.Add(c.concurrency) + + c.results = make(chan *result, c.concurrency*1_000) + c.stopCh = make(chan struct{}, c.concurrency) + + go func() { + c.reporter = NewReporter(c.results, c.w) + c.reporter.Run() + }() + + c.start = time.Now() + + // throttle the number of requests per second + var qpsTicker *time.Ticker + if c.qps > 0 { + qpsTicker = time.NewTicker(time.Second / time.Duration(c.qps)) + } + + for i := 0; i < c.concurrency; i++ { + go func() { + c.startWorker(qpsTicker) + wg.Done() + }() + } + wg.Wait() + + // close the results channel so reporter will stop + close(c.results) + + // wait until the reporter is done + <-c.reporter.doneCh + + return nil +} + +func (c *CMD) startWorker(qpsTicker *time.Ticker) { + for { + // check if we should stop + select { + case <-c.stopCh: + return + default: + } + + // wait for the next tick if we are rate limiting this command + if qpsTicker != nil { + <-qpsTicker.C + } + + start := time.Now() + + var statusCode int = 0 + + arr := strings.Fields(c.cmd) + + data, err := exec.Command(arr[0], arr[1:]...).Output() + if err != nil { + fmt.Println("1") + if exitError, ok := err.(*exec.ExitError); ok { + statusCode = exitError.ExitCode() + } else { + statusCode = 1 + } + } else { + if c.printResp { + fmt.Printf("[%s] %s", c.cmd, string(data)) + } + } + + c.results <- &result{ + statusCode: &statusCode, + err: err, + duration: time.Since(start), + } + } +} + +func (c *CMD) Stop() { + for i := 0; i < c.concurrency; i++ { + c.stopCh <- struct{}{} + } +} + +func (c *CMD) Report() { + total := time.Since(c.start) + fmt.Fprintf(c.w, "[%s]:\n", c.cmd) + fmt.Fprintf(c.w, "- Options:\n") + fmt.Fprintf(c.w, " - concurrency: %d\n", c.concurrency) + fmt.Fprintf(c.w, " - qps: %d\n", c.qps) + c.reporter.Print(total, c.w) +} diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 3b6f7ddae..7d3c0cde0 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -120,6 +120,7 @@ func main() { sealBenchCmd, simpleCmd, importBenchCmd, + cliCmd, rpcCmd, }, } @@ -555,7 +556,7 @@ var sealBenchCmd = &cli.Command{ if !skipc2 { log.Info("generating winning post candidates") - wipt, err := spt(sectorSize).RegisteredWinningPoStProof() + wipt, err := spt(sectorSize, false).RegisteredWinningPoStProof() if err != nil { return err } @@ -773,7 +774,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par Miner: mid, Number: i, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, false), } start := time.Now() @@ -803,7 +804,7 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par Miner: mid, Number: i, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, false), } start := time.Now() @@ -1014,7 +1015,7 @@ var proveCmd = &cli.Command{ Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum), }, - ProofType: spt(abi.SectorSize(c2in.SectorSize)), + ProofType: spt(abi.SectorSize(c2in.SectorSize), false), } fmt.Printf("----\nstart proof computation\n") @@ -1045,8 +1046,8 @@ func bps(sectorSize abi.SectorSize, sectorNum int, d time.Duration) string { return types.SizeStr(types.BigInt{Int: bps}) + "/s" } -func spt(ssize abi.SectorSize) abi.RegisteredSealProof { - spt, err := miner.SealProofTypeFromSectorSize(ssize, build.TestNetworkVersion) +func spt(ssize abi.SectorSize, synth bool) abi.RegisteredSealProof { + spt, err := miner.SealProofTypeFromSectorSize(ssize, build.TestNetworkVersion, synth) if err != nil { panic(err) } diff --git a/cmd/lotus-bench/reporter.go b/cmd/lotus-bench/reporter.go new file mode 100644 index 000000000..ad2ad6b9d --- /dev/null +++ b/cmd/lotus-bench/reporter.go @@ -0,0 +1,181 @@ +package main + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" + "text/tabwriter" + "time" +) + +// result is the result of a single rpc method request. +type result struct { + err error + statusCode *int + duration time.Duration +} + +// Reporter reads the results from the workers through the results channel and aggregates the results. +type Reporter struct { + // write the report to this writer + w io.Writer + // the reporter read the results from this channel + results chan *result + // doneCh is used to signal that the reporter has finished reading the results (channel has closed) + doneCh chan bool + + // lock protect the following fields during critical sections (if --watch was specified) + lock sync.Mutex + // the latencies of all requests + latencies []int64 + // the number of requests that returned each status code + statusCodes map[int]int + // the number of errors that occurred + errors map[string]int +} + +func NewReporter(results chan *result, w io.Writer) *Reporter { + return &Reporter{ + w: w, + results: results, + doneCh: make(chan bool, 1), + statusCodes: make(map[int]int), + errors: make(map[string]int), + } +} + +func (r *Reporter) Run() { + for res := range r.results { + r.lock.Lock() + + r.latencies = append(r.latencies, res.duration.Milliseconds()) + + if res.statusCode != nil { + r.statusCodes[*res.statusCode]++ + } + + if res.err != nil { + if len(r.errors) < 1_000_000 { + r.errors[res.err.Error()]++ + } else { + // we don't want to store too many errors in memory + r.errors["hidden"]++ + } + } else { + r.errors["nil"]++ + } + + r.lock.Unlock() + } + + r.doneCh <- true +} + +func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { + r.lock.Lock() + defer r.lock.Unlock() + + nrReq := int64(len(r.latencies)) + if nrReq == 0 { + fmt.Println("No requests were made") + return + } + + // we need to sort the latencies slice to calculate the percentiles + sort.Slice(r.latencies, func(i, j int) bool { + return r.latencies[i] < r.latencies[j] + }) + + var totalLatency int64 = 0 + for _, latency := range r.latencies { + totalLatency += latency + } + + fmt.Fprintf(w, "- Total Requests: %d\n", nrReq) + fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds()) + fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds()) + fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq) + fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2]) + fmt.Fprintf(w, "- Latency distribution:\n") + percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999} + for _, p := range percentiles { + idx := int64(p * float64(nrReq)) + fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx]) + } + + // create a simple histogram with 10 buckets spanning the range of latency + // into equal ranges + // + nrBucket := 10 + buckets := make([]Bucket, nrBucket) + latencyRange := r.latencies[len(r.latencies)-1] + bucketRange := latencyRange / int64(nrBucket) + + // mark the end of each bucket + for i := 0; i < nrBucket; i++ { + buckets[i].start = int64(i) * bucketRange + buckets[i].end = buckets[i].start + bucketRange + // extend the last bucked by any remaning range caused by the integer division + if i == nrBucket-1 { + buckets[i].end = latencyRange + } + } + + // count the number of requests in each bucket + currBucket := 0 + for i := 0; i < len(r.latencies); { + if r.latencies[i] <= buckets[currBucket].end { + buckets[currBucket].cnt++ + i++ + } else { + currBucket++ + } + } + + // print the histogram using a tabwriter which will align the columns nicely + fmt.Fprintf(w, "- Histogram:\n") + const padding = 2 + tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug) + for i := 0; i < nrBucket; i++ { + ratio := float64(buckets[i].cnt) / float64(nrReq) + bars := strings.Repeat("#", int(ratio*100)) + fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100)) + } + tabWriter.Flush() //nolint:errcheck + + fmt.Fprintf(w, "- Status codes:\n") + for code, cnt := range r.statusCodes { + fmt.Fprintf(w, " [%d]: %d\n", code, cnt) + } + + // print the 10 most occurring errors (in case error values are not unique) + // + type kv struct { + err string + cnt int + } + var sortedErrors []kv + for err, cnt := range r.errors { + sortedErrors = append(sortedErrors, kv{err, cnt}) + } + sort.Slice(sortedErrors, func(i, j int) bool { + return sortedErrors[i].cnt > sortedErrors[j].cnt + }) + fmt.Fprintf(w, "- Errors (top 10):\n") + for i, se := range sortedErrors { + if i > 10 { + break + } + fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt) + } +} + +type Bucket struct { + start int64 + // the end value of the bucket + end int64 + // how many entries are in the bucket + cnt int +} diff --git a/cmd/lotus-bench/rpc.go b/cmd/lotus-bench/rpc.go index 5da784c6e..4af4bdb27 100644 --- a/cmd/lotus-bench/rpc.go +++ b/cmd/lotus-bench/rpc.go @@ -9,11 +9,9 @@ import ( "net/http" "os" "os/signal" - "sort" "strconv" "strings" "sync" - "text/tabwriter" "time" "github.com/urfave/cli/v2" @@ -243,13 +241,6 @@ type RPCMethod struct { reporter *Reporter } -// result is the result of a single rpc method request. -type result struct { - err error - statusCode *int - duration time.Duration -} - func (rpc *RPCMethod) Run() error { client := &http.Client{ Timeout: 0, @@ -411,166 +402,3 @@ func (rpc *RPCMethod) Report() { fmt.Fprintf(rpc.w, " - qps: %d\n", rpc.qps) rpc.reporter.Print(total, rpc.w) } - -// Reporter reads the results from the workers through the results channel and aggregates the results. -type Reporter struct { - // write the report to this writer - w io.Writer - // the reporter read the results from this channel - results chan *result - // doneCh is used to signal that the reporter has finished reading the results (channel has closed) - doneCh chan bool - - // lock protect the following fields during critical sections (if --watch was specified) - lock sync.Mutex - // the latencies of all requests - latencies []int64 - // the number of requests that returned each status code - statusCodes map[int]int - // the number of errors that occurred - errors map[string]int -} - -func NewReporter(results chan *result, w io.Writer) *Reporter { - return &Reporter{ - w: w, - results: results, - doneCh: make(chan bool, 1), - statusCodes: make(map[int]int), - errors: make(map[string]int), - } -} - -func (r *Reporter) Run() { - for res := range r.results { - r.lock.Lock() - - r.latencies = append(r.latencies, res.duration.Milliseconds()) - - if res.statusCode != nil { - r.statusCodes[*res.statusCode]++ - } - - if res.err != nil { - if len(r.errors) < 1_000_000 { - r.errors[res.err.Error()]++ - } else { - // we don't want to store too many errors in memory - r.errors["hidden"]++ - } - } else { - r.errors["nil"]++ - } - - r.lock.Unlock() - } - - r.doneCh <- true -} - -func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { - r.lock.Lock() - defer r.lock.Unlock() - - nrReq := int64(len(r.latencies)) - if nrReq == 0 { - fmt.Println("No requests were made") - return - } - - // we need to sort the latencies slice to calculate the percentiles - sort.Slice(r.latencies, func(i, j int) bool { - return r.latencies[i] < r.latencies[j] - }) - - var totalLatency int64 = 0 - for _, latency := range r.latencies { - totalLatency += latency - } - - fmt.Fprintf(w, "- Total Requests: %d\n", nrReq) - fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds()) - fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds()) - fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq) - fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2]) - fmt.Fprintf(w, "- Latency distribution:\n") - percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999} - for _, p := range percentiles { - idx := int64(p * float64(nrReq)) - fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx]) - } - - // create a simple histogram with 10 buckets spanning the range of latency - // into equal ranges - // - nrBucket := 10 - buckets := make([]Bucket, nrBucket) - latencyRange := r.latencies[len(r.latencies)-1] - bucketRange := latencyRange / int64(nrBucket) - - // mark the end of each bucket - for i := 0; i < nrBucket; i++ { - buckets[i].start = int64(i) * bucketRange - buckets[i].end = buckets[i].start + bucketRange - // extend the last bucked by any remaning range caused by the integer division - if i == nrBucket-1 { - buckets[i].end = latencyRange - } - } - - // count the number of requests in each bucket - currBucket := 0 - for i := 0; i < len(r.latencies); { - if r.latencies[i] <= buckets[currBucket].end { - buckets[currBucket].cnt++ - i++ - } else { - currBucket++ - } - } - - // print the histogram using a tabwriter which will align the columns nicely - fmt.Fprintf(w, "- Histogram:\n") - const padding = 2 - tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug) - for i := 0; i < nrBucket; i++ { - ratio := float64(buckets[i].cnt) / float64(nrReq) - bars := strings.Repeat("#", int(ratio*100)) - fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100)) - } - tabWriter.Flush() //nolint:errcheck - - fmt.Fprintf(w, "- Status codes:\n") - for code, cnt := range r.statusCodes { - fmt.Fprintf(w, " [%d]: %d\n", code, cnt) - } - - // print the 10 most occurring errors (in case error values are not unique) - // - type kv struct { - err string - cnt int - } - var sortedErrors []kv - for err, cnt := range r.errors { - sortedErrors = append(sortedErrors, kv{err, cnt}) - } - sort.Slice(sortedErrors, func(i, j int) bool { - return sortedErrors[i].cnt > sortedErrors[j].cnt - }) - fmt.Fprintf(w, "- Errors (top 10):\n") - for i, se := range sortedErrors { - if i > 10 { - break - } - fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt) - } -} - -type Bucket struct { - start int64 - // the end value of the bucket - end int64 - // how many entries are in the bucket - cnt int -} diff --git a/cmd/lotus-bench/simple.go b/cmd/lotus-bench/simple.go index 6e1dea0db..8ae5713ad 100644 --- a/cmd/lotus-bench/simple.go +++ b/cmd/lotus-bench/simple.go @@ -186,7 +186,7 @@ var simpleAddPiece = &cli.Command{ Miner: mid, Number: 1, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, false), } data, err := os.Open(cctx.Args().First()) @@ -223,6 +223,10 @@ var simplePreCommit1 = &cli.Command{ Usage: "pass miner address (only necessary if using existing sectorbuilder)", Value: "t01000", }, + &cli.BoolFlag{ + Name: "synthetic", + Usage: "generate synthetic PoRep proofs", + }, }, ArgsUsage: "[unsealed] [sealed] [cache] [[piece cid] [piece size]]...", Action: func(cctx *cli.Context) error { @@ -259,7 +263,7 @@ var simplePreCommit1 = &cli.Command{ Miner: mid, Number: 1, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, cctx.Bool("synthetic")), } var ticket [32]byte // all zero @@ -297,6 +301,10 @@ var simplePreCommit2 = &cli.Command{ Usage: "pass miner address (only necessary if using existing sectorbuilder)", Value: "t01000", }, + &cli.BoolFlag{ + Name: "synthetic", + Usage: "generate synthetic PoRep proofs", + }, }, ArgsUsage: "[sealed] [cache] [pc1 out]", Action: func(cctx *cli.Context) error { @@ -337,7 +345,7 @@ var simplePreCommit2 = &cli.Command{ Miner: mid, Number: 1, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, cctx.Bool("synthetic")), } start := time.Now() @@ -368,6 +376,10 @@ var simpleCommit1 = &cli.Command{ Usage: "pass miner address (only necessary if using existing sectorbuilder)", Value: "t01000", }, + &cli.BoolFlag{ + Name: "synthetic", + Usage: "generate synthetic PoRep proofs", + }, }, ArgsUsage: "[sealed] [cache] [comm D] [comm R] [c1out.json]", Action: func(cctx *cli.Context) error { @@ -403,7 +415,7 @@ var simpleCommit1 = &cli.Command{ Miner: mid, Number: 1, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, cctx.Bool("synthetic")), } start := time.Now() @@ -469,6 +481,10 @@ var simpleCommit2 = &cli.Command{ Usage: "pass miner address (only necessary if using existing sectorbuilder)", Value: "t01000", }, + &cli.BoolFlag{ + Name: "synthetic", + Usage: "generate synthetic PoRep proofs", + }, }, Action: func(c *cli.Context) error { if c.Bool("no-gpu") { @@ -515,7 +531,7 @@ var simpleCommit2 = &cli.Command{ Miner: abi.ActorID(mid), Number: abi.SectorNumber(c2in.SectorNum), }, - ProofType: spt(abi.SectorSize(c2in.SectorSize)), + ProofType: spt(abi.SectorSize(c2in.SectorSize), c.Bool("synthetic")), } start := time.Now() @@ -573,7 +589,7 @@ var simpleWindowPost = &cli.Command{ return xerrors.Errorf("parse commr: %w", err) } - wpt, err := spt(sectorSize).RegisteredWindowPoStProof() + wpt, err := spt(sectorSize, false).RegisteredWindowPoStProof() if err != nil { return err } @@ -593,7 +609,7 @@ var simpleWindowPost = &cli.Command{ vp, err := ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{ SectorInfo: prf.SectorInfo{ - SealProof: spt(sectorSize), + SealProof: spt(sectorSize, false), SectorNumber: sn, SealedCID: commr, }, @@ -660,7 +676,7 @@ var simpleWinningPost = &cli.Command{ return xerrors.Errorf("parse commr: %w", err) } - wpt, err := spt(sectorSize).RegisteredWinningPoStProof() + wpt, err := spt(sectorSize, false).RegisteredWinningPoStProof() if err != nil { return err } @@ -680,7 +696,7 @@ var simpleWinningPost = &cli.Command{ vp, err := ffi.GenerateSingleVanillaProof(ffi.PrivateSectorInfo{ SectorInfo: prf.SectorInfo{ - SealProof: spt(sectorSize), + SealProof: spt(sectorSize, false), SectorNumber: sn, SealedCID: commr, }, @@ -763,7 +779,7 @@ var simpleReplicaUpdate = &cli.Command{ Miner: mid, Number: 1, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, false), } start := time.Now() @@ -831,7 +847,7 @@ var simpleProveReplicaUpdate1 = &cli.Command{ Miner: mid, Number: 1, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, false), } start := time.Now() @@ -918,7 +934,7 @@ var simpleProveReplicaUpdate2 = &cli.Command{ Miner: mid, Number: 1, }, - ProofType: spt(sectorSize), + ProofType: spt(sectorSize, false), } start := time.Now() diff --git a/cmd/lotus-gateway/main.go b/cmd/lotus-gateway/main.go index 2023551ef..35a43e18b 100644 --- a/cmd/lotus-gateway/main.go +++ b/cmd/lotus-gateway/main.go @@ -143,7 +143,7 @@ var runCmd = &cli.Command{ }, &cli.DurationFlag{ Name: "rate-limit-timeout", - Usage: "the maximum time to wait for the rate limter before returning an error to clients", + Usage: "the maximum time to wait for the rate limiter before returning an error to clients", Value: gateway.DefaultRateLimitTimeout, }, &cli.Int64Flag{ diff --git a/cmd/lotus-miner/actor.go b/cmd/lotus-miner/actor.go index f0c52278a..320c4e6de 100644 --- a/cmd/lotus-miner/actor.go +++ b/cmd/lotus-miner/actor.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" + minerV12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/network" @@ -49,6 +50,7 @@ var actorCmd = &cli.Command{ actorProposeChangeWorker, actorConfirmChangeWorker, actorCompactAllocatedCmd, + actorMovePartitionsCmd, actorProposeChangeBeneficiary, actorConfirmChangeBeneficiary, }, @@ -1286,13 +1288,149 @@ var actorConfirmChangeBeneficiary = &cli.Command{ }, } +var actorMovePartitionsCmd = &cli.Command{ + Name: "move-partitions", + Usage: "move deadline of specified partitions from one to another", + Flags: []cli.Flag{ + &cli.Int64SliceFlag{ + Name: "partition-indices", + Usage: "Indices of partitions to update, separated by comma", + }, + &cli.Uint64Flag{ + Name: "orig-deadline", + Usage: "Deadline to move partition from", + }, + &cli.Uint64Flag{ + Name: "dest-deadline", + Usage: "Deadline to move partition to", + }, + &cli.BoolFlag{ + Name: "really-do-it", + Usage: "Actually send transaction performing the action", + Value: false, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Bool("really-do-it") { + fmt.Println("Pass --really-do-it to actually execute this action") + return nil + } + + if cctx.Args().Present() { + return fmt.Errorf("please use flags to provide arguments") + } + + ctx := lcli.ReqContext(cctx) + + minerApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + + maddr, err := minerApi.ActorAddress(ctx) + if err != nil { + return err + } + + fmt.Printf("Miner: %s\n", color.BlueString("%s", maddr)) + + fullNodeApi, acloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer acloser() + + minfo, err := fullNodeApi.StateMinerInfo(ctx, maddr, types.EmptyTSK) + if err != nil { + return err + } + + origDeadline := cctx.Uint64("orig-deadline") + if origDeadline > miner.WPoStPeriodDeadlines { + return fmt.Errorf("orig-deadline %d out of range", origDeadline) + } + destDeadline := cctx.Uint64("dest-deadline") + if destDeadline > miner.WPoStPeriodDeadlines { + return fmt.Errorf("dest-deadline %d out of range", destDeadline) + } + if origDeadline == destDeadline { + return fmt.Errorf("dest-desdline cannot be the same as orig-deadline") + } + + partitions := cctx.Int64Slice("partition-indices") + if len(partitions) == 0 { + return fmt.Errorf("must include at least one partition to move") + } + + curPartitions, err := fullNodeApi.StateMinerPartitions(ctx, maddr, origDeadline, types.EmptyTSK) + if err != nil { + return fmt.Errorf("getting partitions for deadline %d: %w", origDeadline, err) + } + if len(partitions) > len(curPartitions) { + return fmt.Errorf("partition size(%d) cannot be bigger than current partition size(%d) for deadline %d", len(partitions), len(curPartitions), origDeadline) + } + + fmt.Printf("Moving %d paritions\n", len(partitions)) + + partitionsBf := bitfield.New() + for _, partition := range partitions { + if partition >= int64(len(curPartitions)) { + return fmt.Errorf("partition index(%d) doesn't exist", partition) + } + partitionsBf.Set(uint64(partition)) + } + + params := minerV12.MovePartitionsParams{ + OrigDeadline: origDeadline, + DestDeadline: destDeadline, + Partitions: partitionsBf, + } + + serializedParams, err := actors.SerializeParams(¶ms) + if err != nil { + return fmt.Errorf("serializing params: %w", err) + } + + smsg, err := fullNodeApi.MpoolPushMessage(ctx, &types.Message{ + From: minfo.Worker, + To: maddr, + Method: builtin.MethodsMiner.MovePartitions, + Value: big.Zero(), + Params: serializedParams, + }, nil) + if err != nil { + return fmt.Errorf("mpool push: %w", err) + } + + fmt.Println("MovePartitions Message CID:", smsg.Cid()) + + // wait for it to get mined into a block + fmt.Println("Waiting for block confirmation...") + wait, err := fullNodeApi.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode.IsError() { + fmt.Println("Moving partitions failed!") + return err + } + + fmt.Println("Move partition confirmed") + + return nil + }, +} + var actorCompactAllocatedCmd = &cli.Command{ Name: "compact-allocated", Usage: "compact allocated sectors bitfield", Flags: []cli.Flag{ &cli.Uint64Flag{ Name: "mask-last-offset", - Usage: "Mask sector IDs from 0 to 'higest_allocated - offset'", + Usage: "Mask sector IDs from 0 to 'highest_allocated - offset'", }, &cli.Uint64Flag{ Name: "mask-upto-n", diff --git a/cmd/lotus-miner/precommits-info.go b/cmd/lotus-miner/precommits-info.go index 0ce757537..3f9e8c927 100644 --- a/cmd/lotus-miner/precommits-info.go +++ b/cmd/lotus-miner/precommits-info.go @@ -7,7 +7,6 @@ import ( cbor "github.com/ipfs/go-ipld-cbor" "github.com/urfave/cli/v2" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/specs-actors/v7/actors/util/adt" "github.com/filecoin-project/lotus/blockstore" @@ -39,8 +38,8 @@ var sectorPreCommitsCmd = &cli.Command{ if err != nil { return err } - preCommitSector := make([]minertypes.SectorPreCommitOnChainInfo, 0) - err = mst.ForEachPrecommittedSector(func(info minertypes.SectorPreCommitOnChainInfo) error { + preCommitSector := make([]miner.SectorPreCommitOnChainInfo, 0) + err = mst.ForEachPrecommittedSector(func(info miner.SectorPreCommitOnChainInfo) error { preCommitSector = append(preCommitSector, info) return err }) diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 3993ecf75..07cc2e795 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -24,14 +24,13 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -193,14 +192,14 @@ var sectorsStatusCmd = &cli.Command{ } tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) - mas, err := lminer.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) + mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) if err != nil { return err } errFound := errors.New("found") - if err := mas.ForEachDeadline(func(dlIdx uint64, dl lminer.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part lminer.Partition) error { + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { pas, err := part.AllSectors() if err != nil { return err @@ -847,7 +846,12 @@ var sectorsCheckExpireCmd = &cli.Command{ for _, sector := range sectors { MaxExpiration := sector.Activation + policy.GetSectorMaxLifetime(sector.SealProof, nv) - MaxExtendNow := currEpoch + policy.GetMaxSectorExpirationExtension() + maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return xerrors.Errorf("failed to get max extension: %w", err) + } + + MaxExtendNow := currEpoch + maxExtension if MaxExtendNow > MaxExpiration { MaxExtendNow = MaxExpiration @@ -1075,22 +1079,22 @@ var sectorsExtendCmd = &cli.Command{ tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) adtStore := adt.WrapStore(ctx, cbor.NewCborStore(tbs)) - mas, err := lminer.Load(adtStore, mact) + mas, err := miner.Load(adtStore, mact) if err != nil { return err } - activeSectorsLocation := make(map[abi.SectorNumber]*lminer.SectorLocation, len(activeSet)) + activeSectorsLocation := make(map[abi.SectorNumber]*miner.SectorLocation, len(activeSet)) - if err := mas.ForEachDeadline(func(dlIdx uint64, dl lminer.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part lminer.Partition) error { + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { pas, err := part.ActiveSectors() if err != nil { return err } return pas.ForEach(func(i uint64) error { - activeSectorsLocation[abi.SectorNumber(i)] = &lminer.SectorLocation{ + activeSectorsLocation[abi.SectorNumber(i)] = &miner.SectorLocation{ Deadline: dlIdx, Partition: partIdx, } @@ -1177,7 +1181,7 @@ var sectorsExtendCmd = &cli.Command{ return diff <= abi.ChainEpoch(cctx.Int64("tolerance")) } - extensions := map[lminer.SectorLocation]map[abi.ChainEpoch][]abi.SectorNumber{} + extensions := map[miner.SectorLocation]map[abi.ChainEpoch][]abi.SectorNumber{} for _, si := range sis { extension := abi.ChainEpoch(cctx.Int64("extension")) newExp := si.Expiration + extension @@ -1186,7 +1190,12 @@ var sectorsExtendCmd = &cli.Command{ newExp = abi.ChainEpoch(cctx.Int64("new-expiration")) } - maxExtendNow := currEpoch + policy.GetMaxSectorExpirationExtension() + maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return xerrors.Errorf("failed to get max extension: %w", err) + } + + maxExtendNow := currEpoch + maxExtension if newExp > maxExtendNow { newExp = maxExtendNow } @@ -1741,7 +1750,7 @@ var sectorsCapacityCollateralCmd = &cli.Command{ return err } - spt, err := lminer.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType) + spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, mi.WindowPoStProofType, false) if err != nil { return err } @@ -1756,7 +1765,12 @@ var sectorsCapacityCollateralCmd = &cli.Command{ return err } - pci.Expiration = policy.GetMaxSectorExpirationExtension() + h.Height() + maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return xerrors.Errorf("failed to get max extension: %w", err) + } + + pci.Expiration = maxExtension + h.Height() } pc, err := nApi.StateMinerInitialPledgeCollateral(ctx, maddr, pci, types.EmptyTSK) @@ -1910,7 +1924,7 @@ var sectorsExpiredCmd = &cli.Command{ } tbs := blockstore.NewTieredBstore(blockstore.NewAPIBlockstore(fullApi), blockstore.NewMemory()) - mas, err := lminer.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) + mas, err := miner.Load(adt.WrapStore(ctx, cbor.NewCborStore(tbs)), mact) if err != nil { return err } @@ -1926,8 +1940,8 @@ var sectorsExpiredCmd = &cli.Command{ return xerrors.Errorf("intersecting bitfields: %w", err) } - if err := mas.ForEachDeadline(func(dlIdx uint64, dl lminer.Deadline) error { - return dl.ForEachPartition(func(partIdx uint64, part lminer.Partition) error { + if err := mas.ForEachDeadline(func(dlIdx uint64, dl miner.Deadline) error { + return dl.ForEachPartition(func(partIdx uint64, part miner.Partition) error { live, err := part.LiveSectors() if err != nil { return err @@ -2552,6 +2566,8 @@ var sectorsUnsealCmd = &cli.Command{ return xerrors.Errorf("could not parse sector number: %w", err) } + fmt.Printf("Unsealing sector %d\n", sectorNum) + return minerAPI.SectorUnseal(ctx, abi.SectorNumber(sectorNum)) }, } diff --git a/cmd/lotus-provider/run.go b/cmd/lotus-provider/run.go index 16683c50e..d69435254 100644 --- a/cmd/lotus-provider/run.go +++ b/cmd/lotus-provider/run.go @@ -217,7 +217,6 @@ var runCmd = &cli.Command{ return err } - // todo fetch limit config stor := paths.NewRemote(localStore, si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{}) wstates := statestore.New(dssync.MutexWrap(ds.NewMapDatastore())) diff --git a/cmd/lotus-seed/main.go b/cmd/lotus-seed/main.go index 863a508f2..d362804c9 100644 --- a/cmd/lotus-seed/main.go +++ b/cmd/lotus-seed/main.go @@ -137,7 +137,9 @@ var preSealCmd = &cli.Command{ nv = network.Version(c.Uint64("network-version")) } - spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv) + var synthetic = false // there's little reason to have this for a seed. + + spt, err := miner.SealProofTypeFromSectorSize(sectorSize, nv, synthetic) if err != nil { return err } diff --git a/cmd/lotus-shed/invariants.go b/cmd/lotus-shed/invariants.go index 51d746f79..e74a0dd24 100644 --- a/cmd/lotus-shed/invariants.go +++ b/cmd/lotus-shed/invariants.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/builtin" v10 "github.com/filecoin-project/go-state-types/builtin/v10" v11 "github.com/filecoin-project/go-state-types/builtin/v11" + v12 "github.com/filecoin-project/go-state-types/builtin/v12" v8 "github.com/filecoin-project/go-state-types/builtin/v8" v9 "github.com/filecoin-project/go-state-types/builtin/v9" @@ -149,6 +150,13 @@ var invariantsCmd = &cli.Command{ if err != nil { return xerrors.Errorf("checking state invariants: %w", err) } + case actorstypes.Version12: + messages, err = v12.CheckStateInvariants(actorTree, abi.ChainEpoch(epoch), actorCodeCids) + if err != nil { + return xerrors.Errorf("checking state invariants: %w", err) + } + default: + return xerrors.Errorf("unsupported actor version: %v", av) } fmt.Println("completed, took ", time.Since(startTime)) diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index aab3d1ef3..a5b66a096 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "os/signal" + "runtime/pprof" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" @@ -116,10 +117,31 @@ func main() { Name: "log-level", Value: "info", }, + &cli.StringFlag{ + Name: "pprof", + Usage: "specify name of file for writing cpu profile to", + }, }, Before: func(cctx *cli.Context) error { + if prof := cctx.String("pprof"); prof != "" { + profile, err := os.Create(prof) + if err != nil { + return err + } + + if err := pprof.StartCPUProfile(profile); err != nil { + return err + } + } + return logging.SetLogLevel("lotus-shed", cctx.String("log-level")) }, + After: func(cctx *cli.Context) error { + if prof := cctx.String("pprof"); prof != "" { + pprof.StopCPUProfile() + } + return nil + }, } // terminate early on ctrl+c diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go index 82a1afddf..96e4747b7 100644 --- a/cmd/lotus-shed/migrations.go +++ b/cmd/lotus-shed/migrations.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/go-state-types/builtin" v10 "github.com/filecoin-project/go-state-types/builtin/v10" v11 "github.com/filecoin-project/go-state-types/builtin/v11" + v12 "github.com/filecoin-project/go-state-types/builtin/v12" market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" v9 "github.com/filecoin-project/go-state-types/builtin/v9" @@ -243,6 +244,8 @@ func getMigrationFuncsForNetwork(nv network.Version) (UpgradeActorsFunc, PreUpgr return filcns.UpgradeActorsV10, filcns.PreUpgradeActorsV10, checkNv18Invariants, nil case network.Version19: return filcns.UpgradeActorsV11, filcns.PreUpgradeActorsV11, checkNv19Invariants, nil + case network.Version21: + return filcns.UpgradeActorsV12, filcns.PreUpgradeActorsV12, checkNv21Invariants, nil default: return nil, nil, nil, xerrors.Errorf("migration not implemented for nv%d", nv) } @@ -252,6 +255,38 @@ type UpgradeActorsFunc = func(context.Context, *stmgr.StateManager, stmgr.Migrat type PreUpgradeActorsFunc = func(context.Context, *stmgr.StateManager, stmgr.MigrationCache, cid.Cid, abi.ChainEpoch, *types.TipSet) error type CheckInvariantsFunc = func(context.Context, cid.Cid, cid.Cid, blockstore.Blockstore, abi.ChainEpoch) error +func checkNv21Invariants(ctx context.Context, oldStateRootCid cid.Cid, newStateRootCid cid.Cid, bs blockstore.Blockstore, epoch abi.ChainEpoch) error { + + actorStore := store.ActorStore(ctx, bs) + startTime := time.Now() + + // Load the new state root. + var newStateRoot types.StateRoot + if err := actorStore.Get(ctx, newStateRootCid, &newStateRoot); err != nil { + return xerrors.Errorf("failed to decode state root: %w", err) + } + + actorCodeCids, err := actors.GetActorCodeIDs(actorstypes.Version12) + if err != nil { + return err + } + newActorTree, err := builtin.LoadTree(actorStore, newStateRoot.Actors) + if err != nil { + return err + } + messages, err := v12.CheckStateInvariants(newActorTree, epoch, actorCodeCids) + if err != nil { + return xerrors.Errorf("checking state invariants: %w", err) + } + + for _, message := range messages.Messages() { + fmt.Println("got the following error: ", message) + } + + fmt.Println("completed invariant checks, took ", time.Since(startTime)) + + return nil +} func checkNv19Invariants(ctx context.Context, oldStateRootCid cid.Cid, newStateRootCid cid.Cid, bs blockstore.Blockstore, epoch abi.ChainEpoch) error { actorStore := store.ActorStore(ctx, bs) diff --git a/cmd/lotus-shed/mpool.go b/cmd/lotus-shed/mpool.go index cfbff2abd..6b210bbc1 100644 --- a/cmd/lotus-shed/mpool.go +++ b/cmd/lotus-shed/mpool.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "time" "github.com/urfave/cli/v2" @@ -43,10 +44,20 @@ var minerSelectMsgsCmd = &cli.Command{ return err } + // Get the size of the mempool + pendingMsgs, err := api.MpoolPending(ctx, types.EmptyTSK) + if err != nil { + return err + } + mpoolSize := len(pendingMsgs) + + // Measure the time taken by MpoolSelect + startTime := time.Now() msgs, err := api.MpoolSelect(ctx, head.Key(), cctx.Float64("ticket-quality")) if err != nil { return err } + duration := time.Since(startTime) var totalGas int64 for i, f := range msgs { @@ -64,6 +75,9 @@ var minerSelectMsgsCmd = &cli.Command{ totalGas += f.Message.GasLimit } + // Log the duration, size of the mempool, selected messages and total gas limit of selected messages + fmt.Printf("Message selection took %s\n", duration) + fmt.Printf("Size of the mempool: %d\n", mpoolSize) fmt.Println("selected messages: ", len(msgs)) fmt.Printf("total gas limit of selected messages: %d / %d (%0.2f%%)\n", totalGas, build.BlockGasLimit, 100*float64(totalGas)/float64(build.BlockGasLimit)) return nil diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index 275f3bc0a..c0bd453b1 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" - badgerbs "github.com/filecoin-project/lotus/blockstore/badger" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/node/repo" @@ -144,13 +144,6 @@ var stateTreePruneCmd = &cli.Command{ } }() - // After migrating to native blockstores, this has been made - // database-specific. - badgbs, ok := bs.(*badgerbs.Blockstore) - if !ok { - return fmt.Errorf("only badger blockstores are supported") - } - mds, err := lkrepo.Datastore(context.Background(), "/metadata") if err != nil { return err @@ -160,8 +153,12 @@ var stateTreePruneCmd = &cli.Command{ const DiscardRatio = 0.2 if cctx.Bool("only-ds-gc") { fmt.Println("running datastore gc....") + gbs, ok := bs.(blockstore.BlockstoreGCOnce) + if !ok { + return xerrors.Errorf("blockstore %T does not support GC", bs) + } for i := 0; i < cctx.Int("gc-count"); i++ { - if err := badgbs.DB().RunValueLogGC(DiscardRatio); err != nil { + if err := gbs.GCOnce(ctx, blockstore.WithThreshold(DiscardRatio)); err != nil { return xerrors.Errorf("datastore GC failed: %w", err) } } @@ -208,13 +205,6 @@ var stateTreePruneCmd = &cli.Command{ return nil } - b := badgbs.DB().NewWriteBatch() - defer b.Cancel() - - markForRemoval := func(c cid.Cid) error { - return b.Delete(badgbs.StorageKey(nil, c)) - } - keys, err := bs.AllKeysChan(context.Background()) if err != nil { return xerrors.Errorf("failed to query blockstore: %w", err) @@ -225,12 +215,12 @@ var stateTreePruneCmd = &cli.Command{ var deleteCount int var goodHits int for k := range keys { - if goodSet.HasRaw(k.Bytes()) { + if goodSet.Has(k) { goodHits++ continue } - if err := markForRemoval(k); err != nil { + if err := bs.DeleteBlock(ctx, k); err != nil { return fmt.Errorf("failed to remove cid %s: %w", k, err) } @@ -243,13 +233,15 @@ var stateTreePruneCmd = &cli.Command{ } } - if err := b.Flush(); err != nil { - return xerrors.Errorf("failed to flush final batch delete: %w", err) + fmt.Println("running datastore gc....") + gbs, ok := bs.(blockstore.BlockstoreGCOnce) + if !ok { + fmt.Println("gc not supported...") + return nil } - fmt.Println("running datastore gc....") for i := 0; i < cctx.Int("gc-count"); i++ { - if err := badgbs.DB().RunValueLogGC(DiscardRatio); err != nil { + if err := gbs.GCOnce(ctx, blockstore.WithThreshold(DiscardRatio)); err != nil { return xerrors.Errorf("datastore GC failed: %w", err) } } diff --git a/cmd/lotus-shed/shedgen/cbor_gen.go b/cmd/lotus-shed/shedgen/cbor_gen.go index a04d52c8e..f2a79fe7d 100644 --- a/cmd/lotus-shed/shedgen/cbor_gen.go +++ b/cmd/lotus-shed/shedgen/cbor_gen.go @@ -38,7 +38,7 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sub"))); err != nil { return err } - if _, err := io.WriteString(w, string("Sub")); err != nil { + if _, err := cw.WriteString(string("Sub")); err != nil { return err } @@ -50,9 +50,11 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Sub { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Sub: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } return nil } @@ -116,12 +118,25 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Sub failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Sub[i]: %w", err) + } + + t.Sub[i] = c + + } } - t.Sub[i] = c } default: diff --git a/cmd/lotus-sim/simulation/stages/precommit_stage.go b/cmd/lotus-sim/simulation/stages/precommit_stage.go index 8f82d8988..1a89413d7 100644 --- a/cmd/lotus-sim/simulation/stages/precommit_stage.go +++ b/cmd/lotus-sim/simulation/stages/precommit_stage.go @@ -165,7 +165,7 @@ func (stage *PreCommitStage) packMiner( // Generate pre-commits. sealType, err := miner.PreferredSealProofTypeFromWindowPoStType( - nv, minerInfo.WindowPoStProofType, + nv, minerInfo.WindowPoStProofType, false, ) if err != nil { return 0, false, err @@ -176,7 +176,12 @@ func (stage *PreCommitStage) packMiner( return 0, false, err } - expiration := epoch + policy.GetMaxSectorExpirationExtension() + maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return 0, false, xerrors.Errorf("failed to get max extension: %w", err) + } + + expiration := epoch + maxExtension infos := make([]minertypes.PreCommitSectorParams, len(sectorNos)) for i, sno := range sectorNos { infos[i] = minertypes.PreCommitSectorParams{ diff --git a/cmd/lotus-worker/main.go b/cmd/lotus-worker/main.go index 6ad3a448e..257dac800 100644 --- a/cmd/lotus-worker/main.go +++ b/cmd/lotus-worker/main.go @@ -7,6 +7,7 @@ import ( "net" "net/http" "os" + "os/signal" "path/filepath" "reflect" "strings" @@ -348,6 +349,18 @@ var runCmd = &cli.Command{ // Connect to storage-miner ctx := lcli.ReqContext(cctx) + // Create a new context with cancel function + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Listen for interrupt signals + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + <-c + cancel() + }() + var nodeApi api.StorageMiner var closer func() for { @@ -359,14 +372,13 @@ var runCmd = &cli.Command{ } } fmt.Printf("\r\x1b[0KConnecting to miner API... (%s)", err) - time.Sleep(time.Second) - continue + select { + case <-ctx.Done(): + return xerrors.New("Interrupted by user") + case <-time.After(time.Second): + } } - defer closer() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - // Register all metric views if err := view.Register( metrics.DefaultViews..., @@ -627,7 +639,7 @@ var runCmd = &cli.Command{ Storage: lr, } - log.Info("Setting up control endpoint at " + address) + log.Info("Setting up control endpoint at " + newAddress) timeout, err := time.ParseDuration(cctx.String("http-server-timeout")) if err != nil { @@ -652,13 +664,13 @@ var runCmd = &cli.Command{ log.Warn("Graceful shutdown successful") }() - nl, err := net.Listen("tcp", address) + nl, err := net.Listen("tcp", newAddress) if err != nil { return err } { - a, err := net.ResolveTCPAddr("tcp", address) + a, err := net.ResolveTCPAddr("tcp", newAddress) if err != nil { return xerrors.Errorf("parsing address: %w", err) } @@ -739,7 +751,7 @@ var runCmd = &cli.Command{ select { case <-readyCh: - if err := nodeApi.WorkerConnect(ctx, "http://"+address+"/rpc/v0"); err != nil { + if err := nodeApi.WorkerConnect(ctx, "http://"+newAddress+"/rpc/v0"); err != nil { log.Errorf("Registering worker failed: %+v", err) cancel() return @@ -801,15 +813,13 @@ func extractRoutableIP(timeout time.Duration) (string, error) { } minerIP, _ := maddr.ValueForProtocol(multiaddr.P_IP6) + if minerIP == "" { + minerIP, _ = maddr.ValueForProtocol(multiaddr.P_IP4) + } minerPort, _ := maddr.ValueForProtocol(multiaddr.P_TCP) - // Check if the IP is IPv6 and format the address appropriately - var addressToDial string - if ip := net.ParseIP(minerIP); ip.To4() == nil && ip.To16() != nil { - addressToDial = "[" + minerIP + "]:" + minerPort - } else { - addressToDial = minerIP + ":" + minerPort - } + // Format the address appropriately + addressToDial := net.JoinHostPort(minerIP, minerPort) conn, err := net.DialTimeout("tcp", addressToDial, timeout) if err != nil { diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 7271a6e53..042e89f21 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -291,10 +291,55 @@ var DaemonCmd = &cli.Command{ chainfile := cctx.String("import-chain") snapshot := cctx.String("import-snapshot") + willImportChain := false if chainfile != "" || snapshot != "" { if chainfile != "" && snapshot != "" { return fmt.Errorf("cannot specify both 'import-snapshot' and 'import-chain'") } + willImportChain = true + } + + willRemoveChain := cctx.Bool("remove-existing-chain") + if willImportChain && !willRemoveChain { + // Confirm with the user about the intention to remove chain data. + reader := bufio.NewReader(os.Stdin) + fmt.Print("Importing chain or snapshot will by default delete existing local chain data. Do you want to proceed and delete? (yes/no): ") + userInput, err := reader.ReadString('\n') + if err != nil { + return xerrors.Errorf("reading user input: %w", err) + } + userInput = strings.ToLower(strings.TrimSpace(userInput)) + + if userInput == "yes" { + willRemoveChain = true + } else if userInput == "no" { + willRemoveChain = false + } else { + return fmt.Errorf("invalid input. please answer with 'yes' or 'no'") + } + } + + if willRemoveChain { + lr, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("error opening fs repo: %w", err) + } + + exists, err := lr.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + err = removeExistingChain(cctx, lr) + if err != nil { + return err + } + } + + if willImportChain { var issnapshot bool if chainfile == "" { chainfile = snapshot diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go index 5da16e16f..d74ae0946 100644 --- a/conformance/chaos/cbor_gen.go +++ b/conformance/chaos/cbor_gen.go @@ -44,7 +44,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Value)); err != nil { + if _, err := cw.WriteString(string(t.Value)); err != nil { return err } @@ -117,13 +117,32 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v UnmarshallableCBOR - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Unmarshallable[i] = new(UnmarshallableCBOR) + if err := t.Unmarshallable[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Unmarshallable[i] pointer: %w", err) + } + } + + } } - - t.Unmarshallable[i] = &v } return nil @@ -177,9 +196,11 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Types { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Types: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } return nil } @@ -252,13 +273,22 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v address.Address - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.Addrs[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Addrs[i]: %w", err) + } + + } } - - t.Addrs[i] = v } // t.Types ([]cid.Cid) (slice) @@ -281,12 +311,25 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Types failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Types[i]: %w", err) + } + + t.Types[i] = c + + } } - t.Types[i] = c } return nil @@ -746,7 +789,7 @@ func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Value)); err != nil { + if _, err := cw.WriteString(string(t.Value)); err != nil { return err } @@ -857,7 +900,7 @@ func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Message)); err != nil { + if _, err := cw.WriteString(string(t.Message)); err != nil { return err } diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 04c037bc5..57070caed 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -4213,6 +4213,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 32212254720, + "MaxMemory": 103079215104, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 64424509440, + "MaxMemory": 128849018880, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -4305,6 +4350,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 2048, "MaxMemory": 2048, @@ -4397,6 +4487,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -4489,6 +4624,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -4581,6 +4761,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -4673,6 +4898,51 @@ Response: "BaseMinMemory": 1073741824, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 4294967296, "MaxMemory": 4294967296, @@ -4765,6 +5035,51 @@ Response: "BaseMinMemory": 0, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1048576, "MaxMemory": 1048576, @@ -4857,6 +5172,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 805306368, "MaxMemory": 1073741824, @@ -4949,6 +5309,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -5041,6 +5446,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -5133,6 +5583,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -5225,6 +5720,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -5317,6 +5857,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -5409,6 +5994,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 805306368, "MaxMemory": 1073741824, diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md index d7d0f092e..1c2100c9c 100644 --- a/documentation/en/api-v0-methods-worker.md +++ b/documentation/en/api-v0-methods-worker.md @@ -138,6 +138,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 32212254720, + "MaxMemory": 103079215104, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 64424509440, + "MaxMemory": 128849018880, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -230,6 +275,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 2048, "MaxMemory": 2048, @@ -322,6 +412,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -414,6 +549,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -506,6 +686,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -598,6 +823,51 @@ Response: "BaseMinMemory": 1073741824, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 4294967296, "MaxMemory": 4294967296, @@ -690,6 +960,51 @@ Response: "BaseMinMemory": 0, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1048576, "MaxMemory": 1048576, @@ -782,6 +1097,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 805306368, "MaxMemory": 1073741824, @@ -874,6 +1234,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -966,6 +1371,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -1058,6 +1508,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1610612736, @@ -1150,6 +1645,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -1242,6 +1782,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 1073741824, "MaxMemory": 1073741824, @@ -1334,6 +1919,51 @@ Response: "BaseMinMemory": 8388608, "MaxConcurrent": 0 }, + "10": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048, + "MaxConcurrent": 0 + }, + "11": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608, + "MaxConcurrent": 0 + }, + "12": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576, + "MaxConcurrent": 0 + }, + "13": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, + "14": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760, + "MaxConcurrent": 0 + }, "2": { "MinMemory": 805306368, "MaxMemory": 1073741824, diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 742f3de8e..9110c1c3b 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -2289,7 +2289,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true }, [ { @@ -2766,7 +2767,8 @@ Inputs: ], { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` @@ -3025,7 +3027,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index e2c249395..1dae12101 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -3357,7 +3357,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true }, [ { @@ -3834,7 +3835,8 @@ Inputs: ], { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` @@ -4226,7 +4228,8 @@ Inputs: }, { "MaxFee": "0", - "MsgUuid": "07070707-0707-0707-0707-070707070707" + "MsgUuid": "07070707-0707-0707-0707-070707070707", + "MaximizeFeeCap": true } ] ``` diff --git a/documentation/en/block-validation.md b/documentation/en/block-validation.md index d178a0667..ac711c6bf 100644 --- a/documentation/en/block-validation.md +++ b/documentation/en/block-validation.md @@ -104,9 +104,9 @@ domain separation tag. ### Winning PoSt proof Draw randomness for current epoch with `WinningPoSt` domain separation tag. -Get list of sectors challanged in this epoch for this miner, based on the randomness drawn. +Get list of sectors challenged in this epoch for this miner, based on the randomness drawn. -`V`: Use filecoin proofs system to verify that miner prooved access to sealed versions of these sectors. +`V`: Use filecoin proofs system to verify that miner proved access to sealed versions of these sectors. ## `(*StateManager).TipSetState()` diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 8406b07cc..6e5942eb2 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -223,6 +223,7 @@ COMMANDS: propose-change-worker Propose a worker address change confirm-change-worker Confirm a worker address change compact-allocated compact allocated sectors bitfield + move-partitions move deadline of specified partitions from one to another propose-change-beneficiary Propose a beneficiary address change confirm-change-beneficiary Confirm a beneficiary address change help, h Shows a list of commands or help for one command @@ -366,12 +367,28 @@ USAGE: lotus-miner actor compact-allocated [command options] [arguments...] OPTIONS: - --mask-last-offset value Mask sector IDs from 0 to 'higest_allocated - offset' (default: 0) + --mask-last-offset value Mask sector IDs from 0 to 'highest_allocated - offset' (default: 0) --mask-upto-n value Mask sector IDs from 0 to 'n' (default: 0) --really-do-it Actually send transaction performing the action (default: false) --help, -h show help ``` +### lotus-miner actor move-partitions +``` +NAME: + lotus-miner actor move-partitions - move deadline of specified partitions from one to another + +USAGE: + lotus-miner actor move-partitions [command options] [arguments...] + +OPTIONS: + --partition-indices value [ --partition-indices value ] Indices of partitions to update, separated by comma + --orig-deadline value Deadline to move partition from (default: 0) + --dest-deadline value Deadline to move partition to (default: 0) + --really-do-it Actually send transaction performing the action (default: false) + --help, -h show help +``` + ### lotus-miner actor propose-change-beneficiary ``` NAME: diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index c37e40f74..1f143b896 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -195,7 +195,7 @@ [Chainstore.Splitstore] # ColdStoreType specifies the type of the coldstore. - # It can be "messages" (default) to store only messages, "universal" to store all chain state or "discard" for discarding cold blocks. + # It can be "discard" (default) for discarding cold blocks, "messages" to store only messages or "universal" to store all chain state.. # # type: string # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index 5400c693d..7b1d0e2e9 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -435,13 +435,10 @@ # env var: LOTUS_PROVING_DISABLEWDPOSTPRECHECKS #DisableWDPoStPreChecks = false - # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16) + # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) # # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - # - # The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which - # means that a single message can prove at most 10 partitions - # + # // # Note that setting this value lower may result in less efficient gas use - more messages will be sent, # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) # @@ -531,7 +528,7 @@ # CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will # live before it must be extended or converted into sector containing deals before it is - # terminated. Value must be between 180-540 days inclusive + # terminated. Value must be between 180-1278 days (1278 in nv21, 540 before nv21). # # type: Duration # env var: LOTUS_SEALING_COMMITTEDCAPACITYSECTORLIFETIME @@ -674,6 +671,12 @@ # env var: LOTUS_SEALING_TERMINATEBATCHWAIT #TerminateBatchWait = "5m0s" + # UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB. + # + # type: bool + # env var: LOTUS_SEALING_USESYNTHETICPOREP + #UseSyntheticPoRep = false + [Storage] # type: int @@ -783,6 +786,10 @@ # env var: LOTUS_FEES_MAXMARKETBALANCEADDFEE #MaxMarketBalanceAddFee = "0.007 FIL" + # type: bool + # env var: LOTUS_FEES_MAXIMIZEWINDOWPOSTFEECAP + #MaximizeWindowPoStFeeCap = true + [Fees.MaxPreCommitBatchGasFee] # type: types.FIL # env var: LOTUS_FEES_MAXPRECOMMITBATCHGASFEE_BASE diff --git a/documentation/en/default-lotus-provider-config.toml b/documentation/en/default-lotus-provider-config.toml index 9d420ff37..cb42c7f5f 100644 --- a/documentation/en/default-lotus-provider-config.toml +++ b/documentation/en/default-lotus-provider-config.toml @@ -155,13 +155,10 @@ # type: bool #DisableWDPoStPreChecks = false - # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16) + # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) # # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - # - # The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which - # means that a single message can prove at most 10 partitions - # + # // # Note that setting this value lower may result in less efficient gas use - more messages will be sent, # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) # diff --git a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md index 116c615d3..42f44b955 100644 --- a/documentation/misc/RELEASE_ISSUE_TEMPLATE.md +++ b/documentation/misc/RELEASE_ISSUE_TEMPLATE.md @@ -14,11 +14,13 @@ **Note for whoever is owning the release:** please capture notes as comments in this issue for anything you noticed that could be improved for future releases. There is a *Post Release* step below for incorporating changes back into the [RELEASE_ISSUE_TEMPLATE](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md), and this is easier done by collecting notes from along the way rather than just thinking about it at the end. First steps: - - [ ] FFI: Fork a new branch (`release/lotus-vX.Y.Z`) from the filecoin-ffi `master` branch - - [ ] FFI: Tag the head of `release/lotus-vX.Y.Z` as `vX.Y.Z-pre1` + - [ ] Set shell variables vX.Y.Z `export X=1 && export Y=24 && export Z=3` + - [ ] FFI: Fork a new branch (`release/lotus-vX.Y.Z`) from the filecoin-ffi `master` branch: `git checkout master && git checkout -b release/lotus-v$X.$Y.$Z` + - [ ] FFI: Tag the head of `release/lotus-vX.Y.Z` as `vX.Y.Z-pre1`: `git tag -a v$X.$Y.$Z-pre1 -m"release"` - [ ] Open and land a PR in lotus `master` that updates the FFI dependency to `vX.Y.Z-pre1` as cut in the previous step - [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage. - - [ ] Bump the version in `build/version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release). Run make gen and make docsgen-cli before committing changes + - [ ] Bump the version in `build/version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release). + - [ ] Run `make gen && make docsgen-cli` before committing changes **Prepping an RC**: @@ -26,6 +28,7 @@ First steps: - [ ] run `make gen && make docsgen-cli` - [ ] Generate changelog using the script at scripts/mkreleaselog - [ ] Add contents of generated text to lotus/CHANGELOG.md in addition to other details +- [ ] Commit using PR - [ ] tag commit with `vX.Y.Z-rcN` - [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index bf5edd551..441fa8e61 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit bf5edd551d23901fa565aac4ce94433afe0c278e +Subproject commit 441fa8e61189dc32c2960c1f8d8ba56269f20366 diff --git a/go.mod b/go.mod index 9d3fcc91d..50946b6cd 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/filecoin-project/go-jsonrpc v0.3.1 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.4 - github.com/filecoin-project/go-state-types v0.12.1 + github.com/filecoin-project/go-state-types v0.12.5 github.com/filecoin-project/go-statemachine v1.0.3 github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-storedcounter v0.1.0 @@ -107,11 +107,12 @@ require ( github.com/ipld/go-ipld-selector-text-lite v0.0.1 github.com/ipni/go-libipni v0.0.8 github.com/ipni/index-provider v0.12.0 + github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa github.com/jackc/pgx/v5 v5.4.1 github.com/kelseyhightower/envconfig v1.4.0 github.com/koalacxr/quantile v0.0.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.30.0 + github.com/libp2p/go-libp2p v0.31.0 github.com/libp2p/go-libp2p-consensus v0.0.1 github.com/libp2p/go-libp2p-gorpc v0.5.0 github.com/libp2p/go-libp2p-kad-dht v0.24.0 @@ -145,7 +146,7 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/urfave/cli/v2 v2.25.5 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba - github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa + github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/xeipuuv/gojsonschema v1.2.0 @@ -261,7 +262,6 @@ require ( github.com/ipfs/go-verifcid v0.0.2 // indirect github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect - github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.0 // indirect @@ -319,7 +319,7 @@ require ( github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-20 v0.3.3 // indirect - github.com/quic-go/quic-go v0.37.6 // indirect + github.com/quic-go/quic-go v0.38.1 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/rivo/uniseg v0.1.0 // indirect github.com/rs/cors v1.7.0 // indirect diff --git a/go.sum b/go.sum index 9fff24eed..aee413de4 100644 --- a/go.sum +++ b/go.sum @@ -345,8 +345,9 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.11.1/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= -github.com/filecoin-project/go-state-types v0.12.1 h1:/1ip/jXIP4QzWd3hlaQ7RGp1DHKKYG3+NOhd/r08UJY= -github.com/filecoin-project/go-state-types v0.12.1/go.mod h1:KOBGyvCalT8uHBS7KSKOVbjsilD90bBZHgLAqrzz6gU= +github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= +github.com/filecoin-project/go-state-types v0.12.5 h1:VQ2N2T3JeUDdIHEo/xhjnT7Q218Wl0UYIyglqT7Z9Ck= +github.com/filecoin-project/go-state-types v0.12.5/go.mod h1:iJTqGdWDvzXhuVf64Lw0hzt4TIoitMo0VgHdxdjNDZI= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk= github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= @@ -1015,8 +1016,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U= -github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg= +github.com/libp2p/go-libp2p v0.31.0 h1:LFShhP8F6xthWiBBq3euxbKjZsoRajVEyBS9snfHxYg= +github.com/libp2p/go-libp2p v0.31.0/go.mod h1:W/FEK1c/t04PbRH3fA9i5oucu5YcgrG0JVoBWT1B7Eg= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= @@ -1510,8 +1511,8 @@ github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM= github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.37.6 h1:2IIUmQzT5YNxAiaPGjs++Z4hGOtIR0q79uS5qE9ccfY= -github.com/quic-go/quic-go v0.37.6/go.mod h1:YsbH1r4mSHPJcLF4k4zruUkLBqctEMBDR6VPvcYjIsU= +github.com/quic-go/quic-go v0.38.1 h1:M36YWA5dEhEeT+slOu/SwMEucbYd0YFidxG3KlGPZaE= +github.com/quic-go/quic-go v0.38.1/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= @@ -1699,8 +1700,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa h1:EyA027ZAkuaCLoxVX4r1TZMPy1d31fM6hbfQ4OU4I5o= -github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f h1:SBuSxXJL0/ZJMtTxbXZgHZkThl9dNrzyaNhlyaqscRo= +github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -2331,6 +2332,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/itests/api_test.go b/itests/api_test.go index c87012cfe..ff43bd5c0 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -86,7 +86,7 @@ func (ts *apiSuite) testID(t *testing.T) { if err != nil { t.Fatal(err) } - require.Regexp(t, "^12", id.Pretty()) + require.Regexp(t, "^12", id.String()) } func (ts *apiSuite) testConnectTwo(t *testing.T) { diff --git a/itests/eth_filter_test.go b/itests/eth_filter_test.go index 62ab5db54..9212e60fc 100644 --- a/itests/eth_filter_test.go +++ b/itests/eth_filter_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" "golang.org/x/xerrors" @@ -543,6 +544,7 @@ func TestTxReceiptBloom(t *testing.T) { kit.MockProofs(), kit.ThroughRPC()) ens.InterconnectAll().BeginMining(blockTime) + logging.SetLogLevel("fullnode", "DEBUG") ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() @@ -577,6 +579,10 @@ func TestTxReceiptBloom(t *testing.T) { } } + // Deflake plan: (Flake: 5 bits instead of 6) + // Debug + search logs for "LogsBloom" + // compare to passing case. + // // 3 bits from the topic, 3 bits from the address require.Equal(t, 6, bitsSet) } diff --git a/itests/eth_transactions_test.go b/itests/eth_transactions_test.go index b39632795..6d60f6786 100644 --- a/itests/eth_transactions_test.go +++ b/itests/eth_transactions_test.go @@ -81,12 +81,16 @@ func TestValueTransferValidSignature(t *testing.T) { receipt, err := waitForEthTxReceipt(ctx, client, hash) require.NoError(t, err) require.NotNil(t, receipt) + require.EqualValues(t, ethAddr, receipt.From) + require.EqualValues(t, ethAddr2, *receipt.To) + require.EqualValues(t, hash, receipt.TransactionHash) // Success. require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) + // Validate that we sent the expected transaction. ethTx, err := client.EthGetTransactionByHash(ctx, &hash) - require.Nil(t, err) + require.NoError(t, err) require.EqualValues(t, ethAddr, ethTx.From) require.EqualValues(t, ethAddr2, *ethTx.To) require.EqualValues(t, tx.ChainID, ethTx.ChainID) @@ -269,6 +273,17 @@ func TestContractInvocation(t *testing.T) { // Success. require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) + + // Validate that we correctly computed the gas outputs. + mCid, err := client.EthGetMessageCidByTransactionHash(ctx, &hash) + require.NoError(t, err) + require.NotNil(t, mCid) + + invokResult, err := client.StateReplay(ctx, types.EmptyTSK, *mCid) + require.NoError(t, err) + require.EqualValues(t, invokResult.GasCost.GasUsed, big.NewInt(int64(receipt.GasUsed))) + effectiveGasPrice := big.Div(invokResult.GasCost.TotalCost, invokResult.GasCost.GasUsed) + require.EqualValues(t, effectiveGasPrice, big.Int(receipt.EffectiveGasPrice)) } func TestGetBlockByNumber(t *testing.T) { diff --git a/itests/fevm_test.go b/itests/fevm_test.go index 1512c3277..cb69c036c 100644 --- a/itests/fevm_test.go +++ b/itests/fevm_test.go @@ -618,9 +618,9 @@ func TestFEVMRecursiveActorCall(t *testing.T) { t.Run("n=251,r=32", testN(251, 32, exitcode.Ok)) t.Run("n=0,r=252", testN(0, 252, exitcode.Ok)) - t.Run("n=251,r=166", testN(251, 166, exitcode.Ok)) + t.Run("n=251,r=164", testN(251, 164, exitcode.Ok)) - t.Run("n=0,r=253-fails", testN(0, 254, exitcode.ExitCode(33))) // 33 means transaction reverted + t.Run("n=0,r=255-fails", testN(0, 255, exitcode.ExitCode(33))) // 33 means transaction reverted t.Run("n=251,r=167-fails", testN(251, 167, exitcode.ExitCode(33))) } diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 1b5fef501..daca6d2d4 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -262,7 +262,7 @@ func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts .. ) // Will use 2KiB sectors by default (default value of sectorSize). - proofType, err := miner.SealProofTypeFromSectorSize(options.sectorSize, n.genesis.version) + proofType, err := miner.SealProofTypeFromSectorSize(options.sectorSize, n.genesis.version, false) require.NoError(n.t, err) // Create the preseal commitment. diff --git a/itests/migration_test.go b/itests/migration_test.go index 4082792ce..68991a579 100644 --- a/itests/migration_test.go +++ b/itests/migration_test.go @@ -34,6 +34,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/datacap" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/system" "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/state" @@ -301,7 +302,7 @@ func TestMigrationNV17(t *testing.T) { minerInfo, err := testClient.StateMinerInfo(ctx, testMiner.ActorAddr, types.EmptyTSK) require.NoError(t, err) - spt, err := miner.SealProofTypeFromSectorSize(minerInfo.SectorSize, network.Version17) + spt, err := miner.SealProofTypeFromSectorSize(minerInfo.SectorSize, network.Version17, false) require.NoError(t, err) preCommitParams := miner9.PreCommitSectorParams{ @@ -762,3 +763,68 @@ waitForProof20: require.Equal(t, v1proof, minerInfo.WindowPoStProofType) } + +func TestMigrationNV21(t *testing.T) { + kit.QuietMiningLogs() + + nv21epoch := abi.ChainEpoch(100) + testClient, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), + kit.UpgradeSchedule(stmgr.Upgrade{ + Network: network.Version20, + Height: -1, + }, stmgr.Upgrade{ + Network: network.Version21, + Height: nv21epoch, + Migration: filcns.UpgradeActorsV12, + }, + )) + + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + clientApi := testClient.FullNode.(*impl.FullNodeAPI) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testClient.WaitTillChain(ctx, kit.HeightAtLeast(nv21epoch+5)) + + // Now that we have upgraded, we need to verify: + // - Sector info changes executed successfully + // - Direct data onboarding correct + + bs := blockstore.NewAPIBlockstore(testClient) + ctxStore := gstStore.WrapBlockStore(ctx, bs) + + currTs, err := clientApi.ChainHead(ctx) + require.NoError(t, err) + + newStateTree, err := state.LoadStateTree(ctxStore, currTs.Blocks()[0].ParentStateRoot) + require.NoError(t, err) + + require.Equal(t, types.StateTreeVersion5, newStateTree.Version()) + + // check the system actor + systemAct, err := newStateTree.GetActor(builtin.SystemActorAddr) + require.NoError(t, err) + + systemCode, ok := actors.GetActorCodeID(actorstypes.Version12, manifest.SystemKey) + require.True(t, ok) + + require.Equal(t, systemCode, systemAct.Code) + + systemSt, err := system.Load(ctxStore, systemAct) + require.NoError(t, err) + + manifest12Cid, ok := actors.GetManifest(actorstypes.Version12) + require.True(t, ok) + + manifest12, err := actors.LoadManifest(ctx, manifest12Cid, ctxStore) + require.NoError(t, err) + require.Equal(t, manifest12.Data, systemSt.GetBuiltinActors()) + + // start post migration checks + + //todo @aayush sector info changes + + //todo @zen Direct data onboarding tests + +} diff --git a/itests/sector_import_full_test.go b/itests/sector_import_full_test.go index e4ec5e141..c9bd96afd 100644 --- a/itests/sector_import_full_test.go +++ b/itests/sector_import_full_test.go @@ -88,7 +88,7 @@ func TestSectorImport(t *testing.T) { require.NoError(t, err) ver, err := client.StateNetworkVersion(ctx, types.EmptyTSK) require.NoError(t, err) - spt, err := lminer.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType) + spt, err := lminer.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType, false) require.NoError(t, err) ssize, err := spt.SectorSize() diff --git a/itests/sector_import_simple_test.go b/itests/sector_import_simple_test.go index f34438694..fb1a77a14 100644 --- a/itests/sector_import_simple_test.go +++ b/itests/sector_import_simple_test.go @@ -63,7 +63,7 @@ func TestSectorImportAfterPC2(t *testing.T) { require.NoError(t, err) ver, err := client.StateNetworkVersion(ctx, types.EmptyTSK) require.NoError(t, err) - spt, err := lminer.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType) + spt, err := lminer.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType, false) require.NoError(t, err) ssize, err := spt.SectorSize() diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go index a2e74ef72..1e045c79d 100644 --- a/itests/sector_pledge_test.go +++ b/itests/sector_pledge_test.go @@ -18,6 +18,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/impl" sealing "github.com/filecoin-project/lotus/storage/pipeline" ) @@ -195,3 +196,30 @@ func TestPledgeMaxBatching(t *testing.T) { t.Run("Force max prove commit aggregate size", runTest) } + +func TestPledgeSynth(t *testing.T) { + kit.QuietMiningLogs() + + blockTime := 50 * time.Millisecond + + runTest := func(t *testing.T, nSectors int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, miner, ens := kit.EnsembleMinimal(t, kit.MutateSealingConfig(func(sc *config.SealingConfig) { + sc.UseSyntheticPoRep = true + })) // no mock proofs + + ens.InterconnectAll().BeginMiningMustPost(blockTime) + + miner.PledgeSectors(ctx, nSectors, 0, nil) + } + + t.Run("1", func(t *testing.T) { + runTest(t, 1) + }) + + t.Run("3", func(t *testing.T) { + runTest(t, 3) + }) +} diff --git a/lib/consensus/raft/consensus.go b/lib/consensus/raft/consensus.go index 60d9dc305..d74f200fa 100644 --- a/lib/consensus/raft/consensus.go +++ b/lib/consensus/raft/consensus.go @@ -342,7 +342,7 @@ func (cc *Consensus) RedirectToLeader(method string, arg interface{}, ret interf return false, nil } - logger.Debugf("redirecting %s to leader: %s", method, leader.Pretty()) + logger.Debugf("redirecting %s to leader: %s", method, leader) finalErr = cc.RpcClient.CallContext( ctx, leader, @@ -394,7 +394,7 @@ func (cc *Consensus) Commit(ctx context.Context, op *ConsensusOp) error { func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error { var finalErr error for i := 0; i <= cc.config.CommitRetries; i++ { - logger.Debugf("attempt #%d: AddPeer %s", i, pid.Pretty()) + logger.Debugf("attempt #%d: AddPeer %s", i, pid) if finalErr != nil { logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr) } @@ -408,7 +408,7 @@ func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error { time.Sleep(cc.config.CommitRetryDelay) continue } - logger.Infof("peer added to Raft: %s", pid.Pretty()) + logger.Infof("peer added to Raft: %s", pid) break } return finalErr @@ -419,7 +419,7 @@ func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error { func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error { var finalErr error for i := 0; i <= cc.config.CommitRetries; i++ { - logger.Debugf("attempt #%d: RmPeer %s", i, pid.Pretty()) + logger.Debugf("attempt #%d: RmPeer %s", i, pid) if finalErr != nil { logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr) } @@ -433,7 +433,7 @@ func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error { time.Sleep(cc.config.CommitRetryDelay) continue } - logger.Infof("peer removed from Raft: %s", pid.Pretty()) + logger.Infof("peer removed from Raft: %s", pid) break } return finalErr diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index a5022613b..bdfce6f55 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -186,7 +186,10 @@ func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, maddr address.Ad return 0, err } - return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType) + // false because this variance is not consumed. + const configWantSynthetic = false + + return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType, configWantSynthetic) } func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { diff --git a/metrics/README.md b/metrics/README.md new file mode 100644 index 000000000..dce18e308 --- /dev/null +++ b/metrics/README.md @@ -0,0 +1,144 @@ +# Setting Up Prometheus and Grafana + +Lotus supports exporting a wide range of metrics, enabling users to gain insights into its behavior and effectively analyze performance issues. These metrics can be conveniently utilized with aggregation and visualization tools for in-depth analysis. In this document, we show how you can set up Prometheus and Grafana for monitoring and visualizing these metrics: + +- **Prometheus**: Prometheus is an open-source monitoring and alerting toolkit designed for collecting and storing time-series data from various systems and applications. It provides a robust querying language (PromQL) and a web-based interface for analyzing and visualizing metrics. + +- **Grafana**: Grafana is an open-source platform for creating, sharing, and visualizing interactive dashboards and graphs. It integrates with various data sources, including Prometheus, to help users create meaningful visual representations of their data and set up alerting based on specific conditions. + +## Prerequisites + +- You have a Linux or Mac based system. +- You have root access to install software +- You have lotus node already running + +**Note:** These instructions have been tested on Ubuntu 23.04 and on Mac M1. + +## Install and start Prometheus + +### On Ubuntu: + +``` +# install prometheus +sudo apt-get install prometheus + +# copy the prometheus.yml config to the correct directory +sudo cp metrics/prometheus.yml /etc/prometheus/prometheus.yml + +# start prometheus +sudo systemctl start prometheus + +# enable prometheus on boot (optional) +sudo systemctl enable prometheus +``` + +### On Mac: + +``` +# install prometheus +brew install prometheus + +# start prometheus +prometheus --config.file=lotus/metrics/prometheus.yml +``` + +## Install and start Grafana + +### On Ubuntu: + +``` +# download the Grafana GPG key in our keyring +wget -q -O - https://packages.grafana.com/gpg.key | gpg --dearmor | sudo tee /usr/share/keyrings/grafana.gpg > /dev/null + +# add the Grafana repository to our APT sources +echo "deb [signed-by=/usr/share/keyrings/grafana.gpg] https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list + +# update our APT cache +sudo apt-get update + +# now we can install grafana +sudo apt-get install grafana + +# start grafana +sudo systemctl start grafana-server + +# start grafana on boot (optional) +sudo systemctl enable grafana-server +``` + +### On Mac: + +``` +brew install grafana +brew services start grafana +``` + +You should now have Prometheus and Grafana running on your machine where Prometheus is already collecting metrics from your Lotus node (if its running) and saving it to a database. + +You can confirm everything is setup correctly by visiting: +- Prometheus (http://localhost:9090): You can open the metric explorer and view any of the aggregated metrics scraped from Lotus +- Grafana (http://localhost:3000): Default username/password is admin/admin, remember to change it after login. + +## Add Prometheus as datasource in Grafana + +1. Log in to Grafana using the web interface. +2. Navigate to "Home" > "Connections" > "Data Sources." +3. Click "Add data source." +4. Choose "Prometheus." +5. In the "HTTP" section, set the URL to http://localhost:9090. +6. Click "Save & Test" to verify the connection. + +## Import one of the existing dashboards in lotus/metrics/grafana + +1. Log in to Grafana using the web interface. +2. Navigate to "Home" > "Dashboards" > Click the drop down menu in the "New" button and select "Import" +3. Paste any of the existing dashboards in lotus/metrics/grafana into the "Import via panel json" panel. +4. Click "Load" +5. Select the Prometheus datasource you created earlier +6. Click "Import" + +# Collect system metrics using node_exporter + +Although Lotus includes many useful metrics it does not include system metrics, such as information about cpu, memory, disk, network, etc. If you are investigating an issue and have Lotus metrics available, its often very useful to correlate certain events or behaviour with general system metrics. + +## Install node_exporter +If you have followed this guide so far and have Prometheus and Grafana already running, you can run the following commands to also aggregate the system metrics: + +Ubuntu: + +``` + +# download the newest release by https://github.com/prometheus/node_exporter/releases (it was 1.6.1 as of writing this doc) +wget https://github.com/prometheus/node_exporter/releases/download/v1.6.1/node_exporter-1.6.1.linux-amd64.tar.gz + +# extract the release (it contains a single binary plus some docs) +tar -xf node_exporter-1.6.1.linux-amd64.tar.gz + +# move it to /usr/local/bin +sudo mv node_exporter-1.6.1.linux-amd64/node_exporter /usr/local/bin + +# run node_exporter +node_exporter +``` + +Mac: + +``` +# install node_exporter +brew install node_exporter + +# run node_exporter +node_exporter +``` + +## Import system dashboard + +Since our `prometheus.yml` config already has configuration for node_exporter, we can go straight away and import a Grafana dashboard for viewing: + +1. Download the most recent dashboard from https://grafana.com/grafana/dashboards/1860-node-exporter-full/ +2. Log in to Grafana (http://localhost:3000) using the web interface. +3. Navigate to "Home" > "Dashboards" > Click the drop down menu in the "New" button and select "Import" +4. Paste any of the existing dashboards in lotus/metrics/grafana into the "Import via panel json" panel. +5. Click "Load" +6. Select the Prometheus datasource you created earlier +7. Click "Import" diff --git a/metrics/grafana/MessageExecution.json b/metrics/grafana/MessageExecution.json new file mode 100644 index 000000000..1bdee4e0a --- /dev/null +++ b/metrics/grafana/MessageExecution.json @@ -0,0 +1,241 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.1.1" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Understand where time is spent in ApplyBlocks which is executed as part of ExecuteTipSet, its metric include:\n\n- applyblocks_total_ms (total): The total time spent in Applyblocks\n- applyblocks_cron (cron): Time spent in cron\n- applyblocks_early (early): Time spent in early apply-blocks (null cron, upgrades)\n- applyblocks_flush (flush): Time spent flushing vm state\n- applyblocks_messages (apply messages): Time spent applying block messages\n", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Time in MS", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 60000, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(lotus_vm_applyblocks_total_ms_bucket[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Total", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(lotus_vm_applyblocks_cron_bucket[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Cron", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(lotus_vm_applyblocks_early_bucket[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Early", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(lotus_vm_applyblocks_flush_bucket[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Flush", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "histogram_quantile(0.99, sum by(le) (rate(lotus_vm_applyblocks_messages_bucket[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Apply messages", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "ApplyBlocks (ms)", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Lotus Message Execution", + "uid": "a7bacd0e-f7a1-418f-98e5-3469c5e0b6ea", + "version": 5, + "weekStart": "" +} \ No newline at end of file diff --git a/metrics/metrics.go b/metrics/metrics.go index 50b47ad69..0ea2c841d 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -106,6 +106,7 @@ var ( MpoolAddTsDuration = stats.Float64("mpool/addts_ms", "Duration of addTs in mpool", stats.UnitMilliseconds) MpoolAddDuration = stats.Float64("mpool/add_ms", "Duration of Add in mpool", stats.UnitMilliseconds) MpoolPushDuration = stats.Float64("mpool/push_ms", "Duration of Push in mpool", stats.UnitMilliseconds) + MpoolMessageCount = stats.Int64("mpool/message_count", "Number of messages in the mpool", stats.UnitDimensionless) BlockPublished = stats.Int64("block/published", "Counter for total locally published blocks", stats.UnitDimensionless) BlockReceived = stats.Int64("block/received", "Counter for total received blocks", stats.UnitDimensionless) BlockValidationFailure = stats.Int64("block/failure", "Counter for block validation failures", stats.UnitDimensionless) @@ -307,6 +308,10 @@ var ( Measure: MpoolPushDuration, Aggregation: defaultMillisecondsDistribution, } + MpoolMessageCountView = &view.View{ + Measure: MpoolMessageCount, + Aggregation: view.LastValue(), + } PeerCountView = &view.View{ Measure: PeerCount, Aggregation: view.LastValue(), @@ -770,6 +775,7 @@ var ChainNodeViews = append([]*view.View{ MpoolAddTsDurationView, MpoolAddDurationView, MpoolPushDurationView, + MpoolMessageCountView, PubsubPublishMessageView, PubsubDeliverMessageView, PubsubRejectMessageView, diff --git a/metrics/prometheus.yml b/metrics/prometheus.yml new file mode 100644 index 000000000..6d1564ab9 --- /dev/null +++ b/metrics/prometheus.yml @@ -0,0 +1,14 @@ +global: + scrape_interval: 1m + +scrape_configs: + - job_name: lotus + scrape_interval: 10s + metrics_path: '/debug/metrics' + static_configs: + - targets: ['localhost:1234'] + + - job_name: node_exporter + scrape_interval: 15s + static_configs: + - targets: ['localhost:9100'] diff --git a/miner/miner.go b/miner/miner.go index d1dee1ec9..54f4263ef 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -594,7 +594,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type } } - if len(refreshedBaseBlocks) != len(base.TipSet.Blocks()) { + if len(refreshedBaseBlocks) != 0 && len(refreshedBaseBlocks) != len(base.TipSet.Blocks()) { refreshedBase, err := types.NewTipSet(refreshedBaseBlocks) if err != nil { err = xerrors.Errorf("failed to create new tipset when refreshing: %w", err) diff --git a/node/builder.go b/node/builder.go index e0f4dfe3a..efc5fc459 100644 --- a/node/builder.go +++ b/node/builder.go @@ -90,6 +90,7 @@ const ( // health checks CheckFDLimit CheckFvmConcurrency + CheckUDPBufferSize LegacyMarketsEOL // libp2p @@ -169,6 +170,7 @@ func defaults() []Option { Override(CheckFDLimit, modules.CheckFdLimit(build.DefaultFDLimit)), Override(CheckFvmConcurrency, modules.CheckFvmConcurrency()), + Override(CheckUDPBufferSize, modules.CheckUDPBufferSize(2048*1024)), Override(new(system.MemoryConstraints), modules.MemoryConstraints), Override(InitMemoryWatchdog, modules.MemoryWatchdog), diff --git a/node/bundle/bundle.go b/node/bundle/bundle.go index a55cad9f1..716c9043b 100644 --- a/node/bundle/bundle.go +++ b/node/bundle/bundle.go @@ -69,7 +69,7 @@ func LoadBundles(ctx context.Context, bs blockstore.Blockstore, versions ...acto ) if path, ok := build.BundleOverrides[av]; ok { root, err = LoadBundleFromFile(ctx, bs, path) - } else if embedded, ok := build.GetEmbeddedBuiltinActorsBundle(av); ok { + } else if embedded, ok := build.GetEmbeddedBuiltinActorsBundle(av, build.NetworkBundle); ok { root, err = LoadBundle(ctx, bs, bytes.NewReader(embedded)) } else { err = xerrors.Errorf("bundle for actors version v%d not found", av) diff --git a/node/config/def.go b/node/config/def.go index e4447e3c6..746c963fd 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -10,6 +10,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -122,6 +123,8 @@ func DefaultFullNode() *FullNode { } func DefaultStorageMiner() *StorageMiner { + // TODO: Should we increase this to nv21, which would push it to 3.5 years? + maxSectorExtentsion, _ := policy.GetMaxSectorExpirationExtension(network.Version20) cfg := &StorageMiner{ Common: defCommon(), @@ -143,7 +146,7 @@ func DefaultStorageMiner() *StorageMiner { // XXX snap deals wait deals slack if first PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration - CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)), + CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(maxSectorExtentsion) * uint64(time.Second)), AggregateCommits: true, MinCommitBatch: miner5.MinAggregatedSectors, // per FIP13, we must have at least four proofs to aggregate, where 4 is the cross over point where aggregation wins out on single provecommit gas costs @@ -158,6 +161,7 @@ func DefaultStorageMiner() *StorageMiner { TerminateBatchMax: 100, TerminateBatchWait: Duration(5 * time.Minute), MaxSectorProveCommitsSubmittedPerEpoch: 20, + UseSyntheticPoRep: false, }, Proving: ProvingConfig{ @@ -254,6 +258,8 @@ func DefaultStorageMiner() *StorageMiner { MaxWindowPoStGasFee: types.MustParseFIL("5"), MaxPublishDealsFee: types.MustParseFIL("0.05"), MaxMarketBalanceAddFee: types.MustParseFIL("0.007"), + + MaximizeWindowPoStFeeCap: true, }, Addresses: MinerAddressConfig{ diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index ecc95ddea..ce9d7f5b9 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -908,6 +908,12 @@ over the worker address if this flag is set.`, Name: "MaxMarketBalanceAddFee", Type: "types.FIL", + Comment: ``, + }, + { + Name: "MaximizeWindowPoStFeeCap", + Type: "bool", + Comment: ``, }, }, @@ -1068,13 +1074,10 @@ After changing this option, confirm that the new value works in your setup by in Name: "MaxPartitionsPerPoStMessage", Type: "int", - Comment: `Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16) + Comment: `Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - -The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which -means that a single message can prove at most 10 partitions - +// Note that setting this value lower may result in less efficient gas use - more messages will be sent, to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) @@ -1365,7 +1368,7 @@ required to have expiration of at least the soonest-ending deal`, Comment: `CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will live before it must be extended or converted into sector containing deals before it is -terminated. Value must be between 180-540 days inclusive`, +terminated. Value must be between 180-1278 days (1278 in nv21, 540 before nv21).`, }, { Name: "WaitDealsDelay", @@ -1510,6 +1513,12 @@ Submitting a smaller number of prove commits per epoch would reduce the possibil Comment: ``, }, + { + Name: "UseSyntheticPoRep", + Type: "bool", + + Comment: `UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB.`, + }, }, "Splitstore": { { @@ -1517,7 +1526,7 @@ Submitting a smaller number of prove commits per epoch would reduce the possibil Type: "string", Comment: `ColdStoreType specifies the type of the coldstore. -It can be "messages" (default) to store only messages, "universal" to store all chain state or "discard" for discarding cold blocks.`, +It can be "discard" (default) for discarding cold blocks, "messages" to store only messages or "universal" to store all chain state..`, }, { Name: "HotStoreType", diff --git a/node/config/types.go b/node/config/types.go index d85e1921d..d59bb5d71 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -328,13 +328,10 @@ type ProvingConfig struct { // 'lotus-miner proving compute window-post 0' DisableWDPoStPreChecks bool - // Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (10 in nv16) + // Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) // // A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - // - // The maximum number of sectors which can be proven in a single PoSt message is 25000 in network version 16, which - // means that a single message can prove at most 10 partitions - // + // // // Note that setting this value lower may result in less efficient gas use - more messages will be sent, // to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) // @@ -396,7 +393,7 @@ type SealingConfig struct { // CommittedCapacitySectorLifetime is the duration a Committed Capacity (CC) sector will // live before it must be extended or converted into sector containing deals before it is - // terminated. Value must be between 180-540 days inclusive + // terminated. Value must be between 180-1278 days (1278 in nv21, 540 before nv21). CommittedCapacitySectorLifetime Duration // Period of time that a newly created sector will wait for more deals to be packed in to before it starts to seal. @@ -467,6 +464,9 @@ type SealingConfig struct { // todo TargetSealingSectors uint64 // todo TargetSectors - stop auto-pleding new sectors after this many sectors are sealed, default CC upgrade for deals sectors if above + + // UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB. + UseSyntheticPoRep bool } type SealerConfig struct { @@ -528,6 +528,8 @@ type MinerFeeConfig struct { MaxWindowPoStGasFee types.FIL MaxPublishDealsFee types.FIL MaxMarketBalanceAddFee types.FIL + + MaximizeWindowPoStFeeCap bool } type LotusProviderFees struct { @@ -657,7 +659,7 @@ type Chainstore struct { type Splitstore struct { // ColdStoreType specifies the type of the coldstore. - // It can be "messages" (default) to store only messages, "universal" to store all chain state or "discard" for discarding cold blocks. + // It can be "discard" (default) for discarding cold blocks, "messages" to store only messages or "universal" to store all chain state.. ColdStoreType string // HotStoreType specifies the type of the hotstore. // Only currently supported value is "badger". diff --git a/node/hello/cbor_gen.go b/node/hello/cbor_gen.go index 5b0697f55..78e950f6f 100644 --- a/node/hello/cbor_gen.go +++ b/node/hello/cbor_gen.go @@ -43,9 +43,11 @@ func (t *HelloMessage) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.HeaviestTipSet { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.HeaviestTipSet: %w", err) + + if err := cbg.WriteCid(cw, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) } + } // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) @@ -116,12 +118,25 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.HeaviestTipSet failed: %w", err) + { + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.HeaviestTipSet[i]: %w", err) + } + + t.HeaviestTipSet[i] = c + + } } - t.HeaviestTipSet[i] = c } // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) diff --git a/node/impl/client/client.go b/node/impl/client/client.go index fff46acc7..c7bb252a1 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -197,7 +197,7 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt return nil, xerrors.Errorf("failed to get network version: %w", err) } - st, err := miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) + st, err := miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType, false) if err != nil { return nil, xerrors.Errorf("failed to get seal proof type: %w", err) } diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index a051b49b1..6b8b0e0aa 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -397,26 +397,31 @@ func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash } msgLookup, err := a.StateAPI.StateSearchMsg(ctx, types.EmptyTSK, c, limit, true) - if err != nil || msgLookup == nil { + if err != nil { + return nil, xerrors.Errorf("failed to lookup Eth Txn %s as %s: %w", txHash, c, err) + } + if msgLookup == nil { + // This is the best we can do. In theory, we could have just not indexed this + // transaction, but there's no way to check that here. return nil, nil } tx, err := newEthTxFromMessageLookup(ctx, msgLookup, -1, a.Chain, a.StateAPI) if err != nil { - return nil, nil + return nil, xerrors.Errorf("failed to convert %s into an Eth Txn: %w", txHash, err) } var events []types.Event if rct := msgLookup.Receipt; rct.EventsRoot != nil { events, err = a.ChainAPI.ChainGetEvents(ctx, *rct.EventsRoot) if err != nil { - return nil, nil + return nil, xerrors.Errorf("failed get events for %s", txHash) } } receipt, err := newEthTxReceipt(ctx, tx, msgLookup, events, a.Chain, a.StateAPI) if err != nil { - return nil, nil + return nil, xerrors.Errorf("failed to convert %s into an Eth Receipt: %w", txHash, err) } return &receipt, nil @@ -728,10 +733,11 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth } rewards, totalGasUsed := calculateRewardsAndGasUsed(rewardPercentiles, txGasRewards) + maxGas := build.BlockGasLimit * int64(len(ts.Blocks())) // arrays should be reversed at the end baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(basefee)) - gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(build.BlockGasLimit)) + gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(maxGas)) rewardsArray = append(rewardsArray, rewards) oldestBlkHeight = uint64(ts.Height()) blocksIncluded++ @@ -942,10 +948,7 @@ func (a *EthModule) EthTraceReplayBlockTransactions(ctx context.Context, blkNum return nil, xerrors.Errorf("failed to decode payload: %w", err) } } else { - output, err = handleFilecoinMethodOutput(ir.ExecutionTrace.MsgRct.ExitCode, ir.ExecutionTrace.MsgRct.ReturnCodec, ir.ExecutionTrace.MsgRct.Return) - if err != nil { - return nil, xerrors.Errorf("could not convert output: %w", err) - } + output = encodeFilecoinReturnAsABI(ir.ExecutionTrace.MsgRct.ExitCode, ir.ExecutionTrace.MsgRct.ReturnCodec, ir.ExecutionTrace.MsgRct.Return) } t := ethtypes.EthTraceReplayBlockTransaction{ diff --git a/node/impl/full/eth_test.go b/node/impl/full/eth_test.go index 903c2c1d8..c364a4873 100644 --- a/node/impl/full/eth_test.go +++ b/node/impl/full/eth_test.go @@ -1,6 +1,7 @@ package full import ( + "encoding/hex" "testing" "github.com/ipfs/go-cid" @@ -162,3 +163,17 @@ func TestRewardPercentiles(t *testing.T) { require.Equal(t, ans, rewards) } } + +func TestABIEncoding(t *testing.T) { + // Generated from https://abi.hashex.org/ + const expected = "000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000510000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000001b1111111111111111111020200301000000044444444444444444010000000000" + const data = "111111111111111111102020030100000004444444444444444401" + + expectedBytes, err := hex.DecodeString(expected) + require.NoError(t, err) + + dataBytes, err := hex.DecodeString(data) + require.NoError(t, err) + + require.Equal(t, expectedBytes, encodeAsABIHelper(22, 81, dataBytes)) +} diff --git a/node/impl/full/eth_trace.go b/node/impl/full/eth_trace.go index 3766c5448..fd5c25566 100644 --- a/node/impl/full/eth_trace.go +++ b/node/impl/full/eth_trace.go @@ -3,18 +3,13 @@ package full import ( "bytes" "context" - "encoding/binary" - "fmt" - "io" "github.com/multiformats/go-multicodec" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/builtin/v10/evm" - "github.com/filecoin-project/go-state-types/exitcode" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" @@ -124,14 +119,8 @@ func buildTraces(ctx context.Context, traces *[]*ethtypes.EthTrace, parent *etht } else { // we are going to assume a native method, but we may change it in one of the edge cases below // TODO: only do this if we know it's a native method (optimization) - trace.Action.Input, err = handleFilecoinMethodInput(et.Msg.Method, et.Msg.ParamsCodec, et.Msg.Params) - if err != nil { - return xerrors.Errorf("buildTraces: %w", err) - } - trace.Result.Output, err = handleFilecoinMethodOutput(et.MsgRct.ExitCode, et.MsgRct.ReturnCodec, et.MsgRct.Return) - if err != nil { - return xerrors.Errorf("buildTraces: %w", err) - } + trace.Action.Input = encodeFilecoinParamsAsABI(et.Msg.Method, et.Msg.ParamsCodec, et.Msg.Params) + trace.Result.Output = encodeFilecoinReturnAsABI(et.MsgRct.ExitCode, et.MsgRct.ReturnCodec, et.MsgRct.Return) } // TODO: is it OK to check this here or is this only specific to certain edge case (evm to evm)? @@ -258,96 +247,3 @@ func buildTraces(ctx context.Context, traces *[]*ethtypes.EthTrace, parent *etht return nil } - -func writePadded(w io.Writer, data any, size int) error { - tmp := &bytes.Buffer{} - - // first write data to tmp buffer to get the size - err := binary.Write(tmp, binary.BigEndian, data) - if err != nil { - return fmt.Errorf("writePadded: failed writing tmp data to buffer: %w", err) - } - - if tmp.Len() > size { - return fmt.Errorf("writePadded: data is larger than size") - } - - // write tailing zeros to pad up to size - cnt := size - tmp.Len() - for i := 0; i < cnt; i++ { - err = binary.Write(w, binary.BigEndian, uint8(0)) - if err != nil { - return fmt.Errorf("writePadded: failed writing tailing zeros to buffer: %w", err) - } - } - - // finally write the actual value - err = binary.Write(w, binary.BigEndian, tmp.Bytes()) - if err != nil { - return fmt.Errorf("writePadded: failed writing data to buffer: %w", err) - } - - return nil -} - -func handleFilecoinMethodInput(method abi.MethodNum, codec uint64, params []byte) ([]byte, error) { - NATIVE_METHOD_SELECTOR := []byte{0x86, 0x8e, 0x10, 0xc4} - EVM_WORD_SIZE := 32 - - staticArgs := []uint64{ - uint64(method), - codec, - uint64(EVM_WORD_SIZE) * 3, - uint64(len(params)), - } - totalWords := len(staticArgs) + (len(params) / EVM_WORD_SIZE) - if len(params)%EVM_WORD_SIZE != 0 { - totalWords++ - } - len := 4 + totalWords*EVM_WORD_SIZE - - w := &bytes.Buffer{} - err := binary.Write(w, binary.BigEndian, NATIVE_METHOD_SELECTOR) - if err != nil { - return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing method selector: %w", err) - } - - for _, arg := range staticArgs { - err := writePadded(w, arg, 32) - if err != nil { - return nil, fmt.Errorf("handleFilecoinMethodInput: %w", err) - } - } - err = binary.Write(w, binary.BigEndian, params) - if err != nil { - return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing params: %w", err) - } - remain := len - w.Len() - for i := 0; i < remain; i++ { - err = binary.Write(w, binary.BigEndian, uint8(0)) - if err != nil { - return nil, fmt.Errorf("handleFilecoinMethodInput: failed writing tailing zeros: %w", err) - } - } - - return w.Bytes(), nil -} - -func handleFilecoinMethodOutput(exitCode exitcode.ExitCode, codec uint64, data []byte) ([]byte, error) { - w := &bytes.Buffer{} - - values := []interface{}{uint32(exitCode), codec, uint32(w.Len()), uint32(len(data))} - for _, v := range values { - err := writePadded(w, v, 32) - if err != nil { - return nil, fmt.Errorf("handleFilecoinMethodOutput: %w", err) - } - } - - err := binary.Write(w, binary.BigEndian, data) - if err != nil { - return nil, fmt.Errorf("handleFilecoinMethodOutput: failed writing data: %w", err) - } - - return w.Bytes(), nil -} diff --git a/node/impl/full/eth_utils.go b/node/impl/full/eth_utils.go index 5908c9412..2799638dd 100644 --- a/node/impl/full/eth_utils.go +++ b/node/impl/full/eth_utils.go @@ -3,6 +3,7 @@ package full import ( "bytes" "context" + "encoding/binary" "errors" "fmt" @@ -15,6 +16,7 @@ import ( builtintypes "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/builtin/v10/eam" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -627,7 +629,13 @@ func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLook return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", lookup.TipSet, err) } - baseFee := ts.Blocks()[0].ParentBaseFee + // The tx is located in the parent tipset + parentTs, err := cs.LoadTipSet(ctx, ts.Parents()) + if err != nil { + return api.EthTxReceipt{}, xerrors.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", ts.Parents(), err) + } + + baseFee := parentTs.Blocks()[0].ParentBaseFee gasOutputs := vm.ComputeGasOutputs(lookup.Receipt.GasUsed, int64(tx.Gas), baseFee, big.Int(tx.MaxFeePerGas), big.Int(tx.MaxPriorityFeePerGas), true) totalSpent := big.Sum(gasOutputs.BaseFeeBurn, gasOutputs.MinerTip, gasOutputs.OverEstimationBurn) @@ -665,6 +673,7 @@ func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLook continue } for _, topic := range topics { + log.Debug("LogsBloom set for ", topic) ethtypes.EthBloomSet(receipt.LogsBloom, topic[:]) } l.Data = data @@ -687,3 +696,45 @@ func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLook return receipt, nil } + +func encodeFilecoinParamsAsABI(method abi.MethodNum, codec uint64, params []byte) []byte { + buf := []byte{0x86, 0x8e, 0x10, 0xc4} // Native method selector. + return append(buf, encodeAsABIHelper(uint64(method), codec, params)...) +} + +func encodeFilecoinReturnAsABI(exitCode exitcode.ExitCode, codec uint64, data []byte) []byte { + return encodeAsABIHelper(uint64(exitCode), codec, data) +} + +// Format 2 numbers followed by an arbitrary byte array as solidity ABI. Both our native +// inputs/outputs follow the same pattern, so we can reuse this code. +func encodeAsABIHelper(param1 uint64, param2 uint64, data []byte) []byte { + const EVM_WORD_SIZE = 32 + + // The first two params are "static" numbers. Then, we record the offset of the "data" arg, + // then, at that offset, we record the length of the data. + // + // In practice, this means we have 4 256-bit words back to back where the third arg (the + // offset) is _always_ '32*3'. + staticArgs := []uint64{param1, param2, EVM_WORD_SIZE * 3, uint64(len(data))} + // We always pad out to the next EVM "word" (32 bytes). + totalWords := len(staticArgs) + (len(data) / EVM_WORD_SIZE) + if len(data)%EVM_WORD_SIZE != 0 { + totalWords++ + } + len := totalWords * EVM_WORD_SIZE + buf := make([]byte, len) + offset := 0 + // Below, we use copy instead of "appending" to preserve all the zero padding. + for _, arg := range staticArgs { + // Write each "arg" into the last 8 bytes of each 32 byte word. + offset += EVM_WORD_SIZE + start := offset - 8 + binary.BigEndian.PutUint64(buf[start:offset], arg) + } + + // Finally, we copy in the data. + copy(buf[offset:], data) + + return buf +} diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 514951675..0e92c8e5b 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "strconv" "github.com/ipfs/go-cid" @@ -19,8 +20,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + market12 "github.com/filecoin-project/go-state-types/builtin/v12/market" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -794,7 +794,7 @@ func (a *StateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealId if err != nil { return nil, err } - if allocationId == verifregtypes.NoAllocationID { + if allocationId == verifreg.NoAllocationID { return nil, nil } @@ -914,32 +914,31 @@ func (a *StateAPI) StateComputeDataCID(ctx context.Context, maddr address.Addres return cid.Cid{}, err } - var ccparams []byte if nv < network.Version13 { - ccparams, err = actors.SerializeParams(&market2.ComputeDataCommitmentParams{ - DealIDs: deals, - SectorType: sectorType, - }) + return a.stateComputeDataCIDv1(ctx, maddr, sectorType, deals, tsk) + } else if nv < network.Version21 { + return a.stateComputeDataCIDv2(ctx, maddr, sectorType, deals, tsk) } else { - ccparams, err = actors.SerializeParams(&market5.ComputeDataCommitmentParams{ - Inputs: []*market5.SectorDataSpec{ - { - DealIDs: deals, - SectorType: sectorType, - }, - }, - }) + return a.stateComputeDataCIDv3(ctx, maddr, sectorType, deals, tsk) } +} + +func (a *StateAPI) stateComputeDataCIDv1(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) { + var err error + ccparams, err := actors.SerializeParams(&market2.ComputeDataCommitmentParams{ + DealIDs: deals, + SectorType: sectorType, + }) if err != nil { return cid.Undef, xerrors.Errorf("computing params for ComputeDataCommitment: %w", err) } - ccmt := &types.Message{ - To: market.Address, - From: maddr, - Value: types.NewInt(0), - Method: market.Methods.ComputeDataCommitment, + To: market.Address, + From: maddr, + Value: types.NewInt(0), + // Hard coded, because the method has since been deprecated + Method: 8, Params: ccparams, } r, err := a.StateCall(ctx, ccmt, tsk) @@ -950,13 +949,42 @@ func (a *StateAPI) StateComputeDataCID(ctx context.Context, maddr address.Addres return cid.Undef, xerrors.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode) } - if nv < network.Version13 { - var c cbg.CborCid - if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { - return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) - } + var c cbg.CborCid + if err := c.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to CborCid: %w", err) + } - return cid.Cid(c), nil + return cid.Cid(c), nil +} + +func (a *StateAPI) stateComputeDataCIDv2(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) { + var err error + ccparams, err := actors.SerializeParams(&market5.ComputeDataCommitmentParams{ + Inputs: []*market5.SectorDataSpec{ + { + DealIDs: deals, + SectorType: sectorType, + }, + }, + }) + + if err != nil { + return cid.Undef, xerrors.Errorf("computing params for ComputeDataCommitment: %w", err) + } + ccmt := &types.Message{ + To: market.Address, + From: maddr, + Value: types.NewInt(0), + // Hard coded, because the method has since been deprecated + Method: 8, + Params: ccparams, + } + r, err := a.StateCall(ctx, ccmt, tsk) + if err != nil { + return cid.Undef, xerrors.Errorf("calling ComputeDataCommitment: %w", err) + } + if r.MsgRct.ExitCode != 0 { + return cid.Undef, xerrors.Errorf("receipt for ComputeDataCommitment had exit code %d", r.MsgRct.ExitCode) } var cr market5.ComputeDataCommitmentReturn @@ -971,6 +999,52 @@ func (a *StateAPI) StateComputeDataCID(ctx context.Context, maddr address.Addres return cid.Cid(cr.CommDs[0]), nil } +func (a *StateAPI) stateComputeDataCIDv3(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) { + if len(deals) == 0 { + return cid.Undef, nil + } + + var err error + ccparams, err := actors.SerializeParams(&market12.VerifyDealsForActivationParams{ + Sectors: []market12.SectorDeals{{ + SectorType: sectorType, + SectorExpiry: math.MaxInt64, + DealIDs: deals, + }}, + }) + + if err != nil { + return cid.Undef, xerrors.Errorf("computing params for VerifyDealsForActivation: %w", err) + } + ccmt := &types.Message{ + To: market.Address, + From: maddr, + Value: types.NewInt(0), + Method: market.Methods.VerifyDealsForActivation, + Params: ccparams, + } + r, err := a.StateCall(ctx, ccmt, tsk) + if err != nil { + return cid.Undef, xerrors.Errorf("calling VerifyDealsForActivation: %w", err) + } + if r.MsgRct.ExitCode != 0 { + return cid.Undef, xerrors.Errorf("receipt for VerifyDealsForActivation had exit code %d", r.MsgRct.ExitCode) + } + + var cr market12.VerifyDealsForActivationReturn + if err := cr.UnmarshalCBOR(bytes.NewReader(r.MsgRct.Return)); err != nil { + return cid.Undef, xerrors.Errorf("failed to unmarshal CBOR to VerifyDealsForActivationReturn: %w", err) + } + if len(cr.UnsealedCIDs) != 1 { + return cid.Undef, xerrors.Errorf("Sectors output must have 1 entry") + } + ucid := cr.UnsealedCIDs[0] + if ucid == nil { + return cid.Undef, xerrors.Errorf("computed data CID is nil") + } + return *ucid, nil +} + func (a *StateAPI) StateChangedActors(ctx context.Context, old cid.Cid, new cid.Cid) (map[string]types.Actor, error) { store := a.Chain.ActorStore(ctx) @@ -1040,7 +1114,7 @@ func (a *StateAPI) StateMinerAllocated(ctx context.Context, addr address.Address return mas.GetAllocatedSectors() } -func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*minertypes.SectorPreCommitOnChainInfo, error) { +func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) { ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) @@ -1315,7 +1389,7 @@ func (m *StateModule) MsigGetPending(ctx context.Context, addr address.Address, var initialPledgeNum = types.NewInt(110) var initialPledgeDen = types.NewInt(100) -func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci minertypes.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { +func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) @@ -1347,7 +1421,7 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr sectorWeight = builtin.QAPowerForWeight(ssize, duration, w, vw) } } else { - sectorWeight = minertypes.QAPowerMax(ssize) + sectorWeight = miner.QAPowerMax(ssize) } var powerSmoothed builtin.FilterEstimate @@ -1379,7 +1453,7 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr return types.BigDiv(types.BigMul(deposit, initialPledgeNum), initialPledgeDen), nil } -func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci minertypes.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { +func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { // TODO: this repeats a lot of the previous function. Fix that. ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { diff --git a/node/impl/net/rcmgr.go b/node/impl/net/rcmgr.go index 872554755..c606aabc6 100644 --- a/node/impl/net/rcmgr.go +++ b/node/impl/net/rcmgr.go @@ -36,7 +36,7 @@ func (a *NetAPI) NetStat(ctx context.Context, scope string) (result api.NetStat, if len(stat.Peers) > 0 { result.Peers = make(map[string]network.ScopeStat, len(stat.Peers)) for p, stat := range stat.Peers { - result.Peers[p.Pretty()] = stat + result.Peers[p.String()] = stat } } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 0ea746e91..a087e084f 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -35,7 +35,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" builtintypes "github.com/filecoin-project/go-state-types/builtin" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" @@ -43,6 +42,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" + lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" @@ -500,7 +500,7 @@ func (sm *StorageMinerAPI) SectorReceive(ctx context.Context, meta api.RemoteSec return err } -func (sm *StorageMinerAPI) ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]minertypes.SubmitWindowedPoStParams, error) { +func (sm *StorageMinerAPI) ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]lminer.SubmitWindowedPoStParams, error) { var ts *types.TipSet var err error if tsk == types.EmptyTSK { @@ -1407,7 +1407,7 @@ func (sm *StorageMinerAPI) withdrawBalance(ctx context.Context, amount abi.Token amount = available } - params, err := actors.SerializeParams(&minertypes.WithdrawBalanceParams{ + params, err := actors.SerializeParams(&lminer.WithdrawBalanceParams{ AmountRequested: amount, }) if err != nil { diff --git a/node/modules/alerts.go b/node/modules/alerts.go index 724761e3c..9976c6d0e 100644 --- a/node/modules/alerts.go +++ b/node/modules/alerts.go @@ -1,8 +1,10 @@ package modules import ( + "net" "os" "strconv" + "syscall" "github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/lib/ulimit" @@ -35,6 +37,69 @@ func CheckFdLimit(min uint64) func(al *alerting.Alerting) { } } +func CheckUDPBufferSize(wanted int) func(al *alerting.Alerting) { + return func(al *alerting.Alerting) { + conn, err := net.Dial("udp", "localhost:0") + if err != nil { + alert := al.AddAlertType("process", "udp-buffer-size") + al.Raise(alert, map[string]string{ + "message": "Failed to create UDP connection", + "error": err.Error(), + }) + return + } + defer func() { + if err := conn.Close(); err != nil { + log.Warnf("Failed to close connection: %s", err) + } + }() + + udpConn, ok := conn.(*net.UDPConn) + if !ok { + alert := al.AddAlertType("process", "udp-buffer-size") + al.Raise(alert, map[string]string{ + "message": "Failed to cast connection to UDPConn", + }) + return + } + + file, err := udpConn.File() + if err != nil { + alert := al.AddAlertType("process", "udp-buffer-size") + al.Raise(alert, map[string]string{ + "message": "Failed to get file descriptor from UDPConn", + "error": err.Error(), + }) + return + } + defer func() { + if err := file.Close(); err != nil { + log.Warnf("Failed to close file: %s", err) + } + }() + + size, err := syscall.GetsockoptInt(int(file.Fd()), syscall.SOL_SOCKET, syscall.SO_RCVBUF) + if err != nil { + alert := al.AddAlertType("process", "udp-buffer-size") + al.Raise(alert, map[string]string{ + "message": "Failed to get UDP buffer size", + "error": err.Error(), + }) + return + } + + if size < wanted { + alert := al.AddAlertType("process", "udp-buffer-size") + al.Raise(alert, map[string]interface{}{ + "message": "UDP buffer size is low", + "current_size": size, + "wanted_size": wanted, + "help": "See https://github.com/quic-go/quic-go/wiki/UDP-Buffer-Sizes for details.", + }) + } + } +} + func LegacyMarketsEOL(al *alerting.Alerting) { // Add alert if lotus-miner legacy markets subsystem is still in use alert := al.AddAlertType("system", "EOL") diff --git a/node/modules/lp2p/host.go b/node/modules/lp2p/host.go index cc7a878fc..9c140b41e 100644 --- a/node/modules/lp2p/host.go +++ b/node/modules/lp2p/host.go @@ -41,7 +41,7 @@ func Peerstore() (peerstore.Peerstore, error) { func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { pkey := params.Peerstore.PrivKey(params.ID) if pkey == nil { - return nil, fmt.Errorf("missing private key for node ID: %s", params.ID.Pretty()) + return nil, fmt.Errorf("missing private key for node ID: %s", params.ID) } opts := []libp2p.Option{ diff --git a/node/modules/services.go b/node/modules/services.go index bb1d41917..f3dd443d9 100644 --- a/node/modules/services.go +++ b/node/modules/services.go @@ -11,8 +11,8 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/host/eventbus" "go.uber.org/fx" "golang.org/x/xerrors" @@ -66,18 +66,22 @@ func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello. ctx := helpers.LifecycleCtx(mctx, lc) go func() { + // We want to get information on connected peers, we don't want to trigger new connections. + ctx := network.WithNoDial(ctx, "filecoin hello") for evt := range sub.Out() { pic := evt.(event.EvtPeerIdentificationCompleted) + // We just finished identifying the peer, that means we should know what + // protocols it speaks. Check if it speeks the Filecoin hello protocol + // before continuing. + if p, _ := h.Peerstore().FirstSupportedProtocol(pic.Peer, hello.ProtocolID); p != hello.ProtocolID { + continue + } + go func() { if err := svc.SayHello(ctx, pic.Peer); err != nil { protos, _ := h.Peerstore().GetProtocols(pic.Peer) agent, _ := h.Peerstore().Get(pic.Peer, "AgentVersion") - if protosContains(protos, hello.ProtocolID) { - log.Warnw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent) - } else { - log.Debugw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent) - } - return + log.Warnw("failed to say hello", "error", err, "peer", pic.Peer, "supported", protos, "agent", agent) } }() } @@ -85,15 +89,6 @@ func RunHello(mctx helpers.MetricsCtx, lc fx.Lifecycle, h host.Host, svc *hello. return nil } -func protosContains(protos []protocol.ID, search protocol.ID) bool { - for _, p := range protos { - if p == search { - return true - } - } - return false -} - func RunPeerMgr(mctx helpers.MetricsCtx, lc fx.Lifecycle, pmgr *peermgr.PeerMgr) { go pmgr.Run(helpers.LifecycleCtx(mctx, lc)) } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 3e081d7e4..0680029bf 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -157,7 +157,8 @@ func SealProofType(maddr dtypes.MinerAddress, fnapi v1api.FullNode) (abi.Registe return 0, err } - return miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType) + // node seal proof type does not decide whether or not we use synthetic porep + return miner.PreferredSealProofTypeFromWindowPoStType(networkVersion, mi.WindowPoStProofType, false) } func AddressSelector(addrConf *config.MinerAddressConfig) func() (*ctladdr.AddressSelector, error) { @@ -1015,6 +1016,7 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error TerminateBatchMin: cfg.TerminateBatchMin, TerminateBatchWait: config.Duration(cfg.TerminateBatchWait), MaxSectorProveCommitsSubmittedPerEpoch: cfg.MaxSectorProveCommitsSubmittedPerEpoch, + UseSyntheticPoRep: cfg.UseSyntheticPoRep, } c.SetSealingConfig(newCfg) }) @@ -1059,6 +1061,7 @@ func ToSealingConfig(dealmakingCfg config.DealmakingConfig, sealingCfg config.Se TerminateBatchMax: sealingCfg.TerminateBatchMax, TerminateBatchMin: sealingCfg.TerminateBatchMin, TerminateBatchWait: time.Duration(sealingCfg.TerminateBatchWait), + UseSyntheticPoRep: sealingCfg.UseSyntheticPoRep, } } diff --git a/paychmgr/cbor_gen.go b/paychmgr/cbor_gen.go index b3880aa10..f97c176a3 100644 --- a/paychmgr/cbor_gen.go +++ b/paychmgr/cbor_gen.go @@ -41,7 +41,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proof"))); err != nil { return err } - if _, err := io.WriteString(w, string("Proof")); err != nil { + if _, err := cw.WriteString(string("Proof")); err != nil { return err } @@ -65,7 +65,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Voucher"))); err != nil { return err } - if _, err := io.WriteString(w, string("Voucher")); err != nil { + if _, err := cw.WriteString(string("Voucher")); err != nil { return err } @@ -81,7 +81,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Submitted"))); err != nil { return err } - if _, err := io.WriteString(w, string("Submitted")); err != nil { + if _, err := cw.WriteString(string("Submitted")); err != nil { return err } @@ -218,7 +218,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Amount"))); err != nil { return err } - if _, err := io.WriteString(w, string("Amount")); err != nil { + if _, err := cw.WriteString(string("Amount")); err != nil { return err } @@ -234,7 +234,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Target"))); err != nil { return err } - if _, err := io.WriteString(w, string("Target")); err != nil { + if _, err := cw.WriteString(string("Target")); err != nil { return err } @@ -250,7 +250,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Channel"))); err != nil { return err } - if _, err := io.WriteString(w, string("Channel")); err != nil { + if _, err := cw.WriteString(string("Channel")); err != nil { return err } @@ -266,7 +266,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Control"))); err != nil { return err } - if _, err := io.WriteString(w, string("Control")); err != nil { + if _, err := cw.WriteString(string("Control")); err != nil { return err } @@ -282,7 +282,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("NextLane"))); err != nil { return err } - if _, err := io.WriteString(w, string("NextLane")); err != nil { + if _, err := cw.WriteString(string("NextLane")); err != nil { return err } @@ -298,7 +298,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Settling"))); err != nil { return err } - if _, err := io.WriteString(w, string("Settling")); err != nil { + if _, err := cw.WriteString(string("Settling")); err != nil { return err } @@ -314,7 +314,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Vouchers"))); err != nil { return err } - if _, err := io.WriteString(w, string("Vouchers")); err != nil { + if _, err := cw.WriteString(string("Vouchers")); err != nil { return err } @@ -339,7 +339,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { return err } - if _, err := io.WriteString(w, string("ChannelID")); err != nil { + if _, err := cw.WriteString(string("ChannelID")); err != nil { return err } @@ -350,7 +350,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ChannelID))); err != nil { return err } - if _, err := io.WriteString(w, string(t.ChannelID)); err != nil { + if _, err := cw.WriteString(string(t.ChannelID)); err != nil { return err } @@ -362,7 +362,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreateMsg"))); err != nil { return err } - if _, err := io.WriteString(w, string("CreateMsg")); err != nil { + if _, err := cw.WriteString(string("CreateMsg")); err != nil { return err } @@ -384,7 +384,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Direction"))); err != nil { return err } - if _, err := io.WriteString(w, string("Direction")); err != nil { + if _, err := cw.WriteString(string("Direction")); err != nil { return err } @@ -400,7 +400,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AddFundsMsg"))); err != nil { return err } - if _, err := io.WriteString(w, string("AddFundsMsg")); err != nil { + if _, err := cw.WriteString(string("AddFundsMsg")); err != nil { return err } @@ -422,7 +422,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PendingAmount"))); err != nil { return err } - if _, err := io.WriteString(w, string("PendingAmount")); err != nil { + if _, err := cw.WriteString(string("PendingAmount")); err != nil { return err } @@ -438,7 +438,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("AvailableAmount"))); err != nil { return err } - if _, err := io.WriteString(w, string("AvailableAmount")); err != nil { + if _, err := cw.WriteString(string("AvailableAmount")); err != nil { return err } @@ -454,7 +454,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PendingAvailableAmount"))); err != nil { return err } - if _, err := io.WriteString(w, string("PendingAvailableAmount")); err != nil { + if _, err := cw.WriteString(string("PendingAvailableAmount")); err != nil { return err } @@ -606,13 +606,32 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v VoucherInfo - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.Vouchers[i] = new(VoucherInfo) + if err := t.Vouchers[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Vouchers[i] pointer: %w", err) + } + } + + } } - - t.Vouchers[i] = &v } // t.ChannelID (string) (string) @@ -746,7 +765,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Err"))); err != nil { return err } - if _, err := io.WriteString(w, string("Err")); err != nil { + if _, err := cw.WriteString(string("Err")); err != nil { return err } @@ -757,7 +776,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Err))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Err)); err != nil { + if _, err := cw.WriteString(string(t.Err)); err != nil { return err } @@ -769,7 +788,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("MsgCid"))); err != nil { return err } - if _, err := io.WriteString(w, string("MsgCid")); err != nil { + if _, err := cw.WriteString(string("MsgCid")); err != nil { return err } @@ -785,7 +804,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Received"))); err != nil { return err } - if _, err := io.WriteString(w, string("Received")); err != nil { + if _, err := cw.WriteString(string("Received")); err != nil { return err } @@ -801,7 +820,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ChannelID"))); err != nil { return err } - if _, err := io.WriteString(w, string("ChannelID")); err != nil { + if _, err := cw.WriteString(string("ChannelID")); err != nil { return err } @@ -812,7 +831,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.ChannelID))); err != nil { return err } - if _, err := io.WriteString(w, string(t.ChannelID)); err != nil { + if _, err := cw.WriteString(string(t.ChannelID)); err != nil { return err } return nil diff --git a/paychmgr/paych.go b/paychmgr/paych.go index 1eb496dba..2729513ce 100644 --- a/paychmgr/paych.go +++ b/paychmgr/paych.go @@ -482,7 +482,7 @@ func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) // the data store over the chain state func (ca *channelAccessor) laneState(ctx context.Context, state lpaych.State, ch address.Address) (map[uint64]lpaych.LaneState, error) { // TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct - // (but technically dont't need to) + // (but technically don't need to) laneCount, err := state.LaneCount() if err != nil { diff --git a/provider/lpwindow/compute_task.go b/provider/lpwindow/compute_task.go index 949c09365..83bf67a31 100644 --- a/provider/lpwindow/compute_task.go +++ b/provider/lpwindow/compute_task.go @@ -14,12 +14,12 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" "github.com/filecoin-project/lotus/lib/harmony/harmonytask" @@ -36,7 +36,7 @@ import ( var log = logging.Logger("lpwindow") -var EpochsPerDeadline = miner.WPoStProvingPeriod / abi.ChainEpoch(miner.WPoStPeriodDeadlines) +var EpochsPerDeadline = miner.WPoStProvingPeriod() / abi.ChainEpoch(miner.WPoStPeriodDeadlines) type WdPostTaskDetails struct { Ts *types.TipSet diff --git a/storage/paths/local.go b/storage/paths/local.go index 7d1be644a..577d4dbe0 100644 --- a/storage/paths/local.go +++ b/storage/paths/local.go @@ -548,7 +548,7 @@ func (st *Local) AcquireSector(ctx context.Context, sid storiface.SectorRef, exi } if best == "" { - return storiface.SectorPaths{}, storiface.SectorPaths{}, xerrors.Errorf("couldn't find a suitable path for a sector") + return storiface.SectorPaths{}, storiface.SectorPaths{}, storiface.Err(storiface.ErrTempAllocateSpace, xerrors.Errorf("couldn't find a suitable path for a sector")) } storiface.SetPathByType(&out, fileType, best) diff --git a/storage/pipeline/cbor_gen.go b/storage/pipeline/cbor_gen.go index 57a668ae6..c832f8a14 100644 --- a/storage/pipeline/cbor_gen.go +++ b/storage/pipeline/cbor_gen.go @@ -43,7 +43,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Log"))); err != nil { return err } - if _, err := io.WriteString(w, string("Log")); err != nil { + if _, err := cw.WriteString(string("Log")); err != nil { return err } @@ -68,7 +68,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CommD"))); err != nil { return err } - if _, err := io.WriteString(w, string("CommD")); err != nil { + if _, err := cw.WriteString(string("CommD")); err != nil { return err } @@ -90,7 +90,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CommR"))); err != nil { return err } - if _, err := io.WriteString(w, string("CommR")); err != nil { + if _, err := cw.WriteString(string("CommR")); err != nil { return err } @@ -112,7 +112,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Proof"))); err != nil { return err } - if _, err := io.WriteString(w, string("Proof")); err != nil { + if _, err := cw.WriteString(string("Proof")); err != nil { return err } @@ -136,7 +136,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { return err } - if _, err := io.WriteString(w, string("State")); err != nil { + if _, err := cw.WriteString(string("State")); err != nil { return err } @@ -147,7 +147,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.State))); err != nil { return err } - if _, err := io.WriteString(w, string(t.State)); err != nil { + if _, err := cw.WriteString(string(t.State)); err != nil { return err } @@ -159,7 +159,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Pieces"))); err != nil { return err } - if _, err := io.WriteString(w, string("Pieces")); err != nil { + if _, err := cw.WriteString(string("Pieces")); err != nil { return err } @@ -184,7 +184,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Return"))); err != nil { return err } - if _, err := io.WriteString(w, string("Return")); err != nil { + if _, err := cw.WriteString(string("Return")); err != nil { return err } @@ -195,7 +195,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Return))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Return)); err != nil { + if _, err := cw.WriteString(string(t.Return)); err != nil { return err } @@ -207,7 +207,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("LastErr"))); err != nil { return err } - if _, err := io.WriteString(w, string("LastErr")); err != nil { + if _, err := cw.WriteString(string("LastErr")); err != nil { return err } @@ -218,7 +218,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.LastErr))); err != nil { return err } - if _, err := io.WriteString(w, string(t.LastErr)); err != nil { + if _, err := cw.WriteString(string(t.LastErr)); err != nil { return err } @@ -230,7 +230,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CCPieces"))); err != nil { return err } - if _, err := io.WriteString(w, string("CCPieces")); err != nil { + if _, err := cw.WriteString(string("CCPieces")); err != nil { return err } @@ -255,7 +255,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CCUpdate"))); err != nil { return err } - if _, err := io.WriteString(w, string("CCUpdate")); err != nil { + if _, err := cw.WriteString(string("CCUpdate")); err != nil { return err } @@ -271,7 +271,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SeedEpoch"))); err != nil { return err } - if _, err := io.WriteString(w, string("SeedEpoch")); err != nil { + if _, err := cw.WriteString(string("SeedEpoch")); err != nil { return err } @@ -293,7 +293,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SeedValue"))); err != nil { return err } - if _, err := io.WriteString(w, string("SeedValue")); err != nil { + if _, err := cw.WriteString(string("SeedValue")); err != nil { return err } @@ -317,7 +317,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorType"))); err != nil { return err } - if _, err := io.WriteString(w, string("SectorType")); err != nil { + if _, err := cw.WriteString(string("SectorType")); err != nil { return err } @@ -339,7 +339,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TicketEpoch"))); err != nil { return err } - if _, err := io.WriteString(w, string("TicketEpoch")); err != nil { + if _, err := cw.WriteString(string("TicketEpoch")); err != nil { return err } @@ -361,7 +361,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TicketValue"))); err != nil { return err } - if _, err := io.WriteString(w, string("TicketValue")); err != nil { + if _, err := cw.WriteString(string("TicketValue")); err != nil { return err } @@ -385,7 +385,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CreationTime"))); err != nil { return err } - if _, err := io.WriteString(w, string("CreationTime")); err != nil { + if _, err := cw.WriteString(string("CreationTime")); err != nil { return err } @@ -407,7 +407,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorNumber"))); err != nil { return err } - if _, err := io.WriteString(w, string("SectorNumber")); err != nil { + if _, err := cw.WriteString(string("SectorNumber")); err != nil { return err } @@ -423,7 +423,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TerminatedAt"))); err != nil { return err } - if _, err := io.WriteString(w, string("TerminatedAt")); err != nil { + if _, err := cw.WriteString(string("TerminatedAt")); err != nil { return err } @@ -445,7 +445,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UpdateSealed"))); err != nil { return err } - if _, err := io.WriteString(w, string("UpdateSealed")); err != nil { + if _, err := cw.WriteString(string("UpdateSealed")); err != nil { return err } @@ -467,7 +467,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("CommitMessage"))); err != nil { return err } - if _, err := io.WriteString(w, string("CommitMessage")); err != nil { + if _, err := cw.WriteString(string("CommitMessage")); err != nil { return err } @@ -489,7 +489,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("InvalidProofs"))); err != nil { return err } - if _, err := io.WriteString(w, string("InvalidProofs")); err != nil { + if _, err := cw.WriteString(string("InvalidProofs")); err != nil { return err } @@ -505,7 +505,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommit1Out"))); err != nil { return err } - if _, err := io.WriteString(w, string("PreCommit1Out")); err != nil { + if _, err := cw.WriteString(string("PreCommit1Out")); err != nil { return err } @@ -529,7 +529,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("FaultReportMsg"))); err != nil { return err } - if _, err := io.WriteString(w, string("FaultReportMsg")); err != nil { + if _, err := cw.WriteString(string("FaultReportMsg")); err != nil { return err } @@ -551,7 +551,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("UpdateUnsealed"))); err != nil { return err } - if _, err := io.WriteString(w, string("UpdateUnsealed")); err != nil { + if _, err := cw.WriteString(string("UpdateUnsealed")); err != nil { return err } @@ -573,7 +573,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommit1Fails"))); err != nil { return err } - if _, err := io.WriteString(w, string("PreCommit1Fails")); err != nil { + if _, err := cw.WriteString(string("PreCommit1Fails")); err != nil { return err } @@ -589,7 +589,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommit2Fails"))); err != nil { return err } - if _, err := io.WriteString(w, string("PreCommit2Fails")); err != nil { + if _, err := cw.WriteString(string("PreCommit2Fails")); err != nil { return err } @@ -605,7 +605,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommitTipSet"))); err != nil { return err } - if _, err := io.WriteString(w, string("PreCommitTipSet")); err != nil { + if _, err := cw.WriteString(string("PreCommitTipSet")); err != nil { return err } @@ -621,7 +621,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoteDataCache"))); err != nil { return err } - if _, err := io.WriteString(w, string("RemoteDataCache")); err != nil { + if _, err := cw.WriteString(string("RemoteDataCache")); err != nil { return err } @@ -637,7 +637,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommitDeposit"))); err != nil { return err } - if _, err := io.WriteString(w, string("PreCommitDeposit")); err != nil { + if _, err := cw.WriteString(string("PreCommitDeposit")); err != nil { return err } @@ -653,7 +653,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PreCommitMessage"))); err != nil { return err } - if _, err := io.WriteString(w, string("PreCommitMessage")); err != nil { + if _, err := cw.WriteString(string("PreCommitMessage")); err != nil { return err } @@ -675,7 +675,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoteDataSealed"))); err != nil { return err } - if _, err := io.WriteString(w, string("RemoteDataSealed")); err != nil { + if _, err := cw.WriteString(string("RemoteDataSealed")); err != nil { return err } @@ -691,7 +691,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("TerminateMessage"))); err != nil { return err } - if _, err := io.WriteString(w, string("TerminateMessage")); err != nil { + if _, err := cw.WriteString(string("TerminateMessage")); err != nil { return err } @@ -713,7 +713,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoteDataUnsealed"))); err != nil { return err } - if _, err := io.WriteString(w, string("RemoteDataUnsealed")); err != nil { + if _, err := cw.WriteString(string("RemoteDataUnsealed")); err != nil { return err } @@ -729,7 +729,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ReplicaUpdateProof"))); err != nil { return err } - if _, err := io.WriteString(w, string("ReplicaUpdateProof")); err != nil { + if _, err := cw.WriteString(string("ReplicaUpdateProof")); err != nil { return err } @@ -753,7 +753,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoteDataFinalized"))); err != nil { return err } - if _, err := io.WriteString(w, string("RemoteDataFinalized")); err != nil { + if _, err := cw.WriteString(string("RemoteDataFinalized")); err != nil { return err } @@ -769,7 +769,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ReplicaUpdateMessage"))); err != nil { return err } - if _, err := io.WriteString(w, string("ReplicaUpdateMessage")); err != nil { + if _, err := cw.WriteString(string("ReplicaUpdateMessage")); err != nil { return err } @@ -791,7 +791,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoteCommit1Endpoint"))); err != nil { return err } - if _, err := io.WriteString(w, string("RemoteCommit1Endpoint")); err != nil { + if _, err := cw.WriteString(string("RemoteCommit1Endpoint")); err != nil { return err } @@ -802,7 +802,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.RemoteCommit1Endpoint))); err != nil { return err } - if _, err := io.WriteString(w, string(t.RemoteCommit1Endpoint)); err != nil { + if _, err := cw.WriteString(string(t.RemoteCommit1Endpoint)); err != nil { return err } @@ -814,7 +814,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoteCommit2Endpoint"))); err != nil { return err } - if _, err := io.WriteString(w, string("RemoteCommit2Endpoint")); err != nil { + if _, err := cw.WriteString(string("RemoteCommit2Endpoint")); err != nil { return err } @@ -825,7 +825,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.RemoteCommit2Endpoint))); err != nil { return err } - if _, err := io.WriteString(w, string(t.RemoteCommit2Endpoint)); err != nil { + if _, err := cw.WriteString(string(t.RemoteCommit2Endpoint)); err != nil { return err } @@ -837,7 +837,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RemoteSealingDoneEndpoint"))); err != nil { return err } - if _, err := io.WriteString(w, string("RemoteSealingDoneEndpoint")); err != nil { + if _, err := cw.WriteString(string("RemoteSealingDoneEndpoint")); err != nil { return err } @@ -848,7 +848,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.RemoteSealingDoneEndpoint))); err != nil { return err } - if _, err := io.WriteString(w, string(t.RemoteSealingDoneEndpoint)); err != nil { + if _, err := cw.WriteString(string(t.RemoteSealingDoneEndpoint)); err != nil { return err } return nil @@ -913,13 +913,22 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v Log - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.Log[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Log[i]: %w", err) + } + + } } - - t.Log[i] = v } // t.CommD (cid.Cid) (struct) @@ -1022,13 +1031,22 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v api.SectorPiece - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.Pieces[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Pieces[i]: %w", err) + } + + } } - - t.Pieces[i] = v } // t.Return (sealing.ReturnState) (string) @@ -1074,13 +1092,22 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v api.SectorPiece - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.CCPieces[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.CCPieces[i]: %w", err) + } + + } } - - t.CCPieces[i] = v } // t.CCUpdate (bool) (bool) @@ -1700,7 +1727,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Kind"))); err != nil { return err } - if _, err := io.WriteString(w, string("Kind")); err != nil { + if _, err := cw.WriteString(string("Kind")); err != nil { return err } @@ -1711,7 +1738,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Kind))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Kind)); err != nil { + if _, err := cw.WriteString(string(t.Kind)); err != nil { return err } @@ -1723,7 +1750,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Trace"))); err != nil { return err } - if _, err := io.WriteString(w, string("Trace")); err != nil { + if _, err := cw.WriteString(string("Trace")); err != nil { return err } @@ -1734,7 +1761,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Trace))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Trace)); err != nil { + if _, err := cw.WriteString(string(t.Trace)); err != nil { return err } @@ -1746,7 +1773,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Message"))); err != nil { return err } - if _, err := io.WriteString(w, string("Message")); err != nil { + if _, err := cw.WriteString(string("Message")); err != nil { return err } @@ -1757,7 +1784,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Message))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Message)); err != nil { + if _, err := cw.WriteString(string(t.Message)); err != nil { return err } @@ -1769,7 +1796,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Timestamp"))); err != nil { return err } - if _, err := io.WriteString(w, string("Timestamp")); err != nil { + if _, err := cw.WriteString(string("Timestamp")); err != nil { return err } diff --git a/storage/pipeline/checks.go b/storage/pipeline/checks.go index b243804cc..ecd160231 100644 --- a/storage/pipeline/checks.go +++ b/storage/pipeline/checks.go @@ -106,13 +106,15 @@ func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, t return err } - commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) - if err != nil { - return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} - } + if si.hasDeals() { + commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) + if err != nil { + return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} + } - if si.CommD == nil || !commD.Equals(*si.CommD) { - return &ErrBadCommD{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)} + if si.CommD == nil || !commD.Equals(*si.CommD) { + return &ErrBadCommD{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)} + } } pci, err := api.StateSectorPreCommitInfo(ctx, maddr, si.SectorNumber, tsk) diff --git a/storage/pipeline/commit_batch.go b/storage/pipeline/commit_batch.go index 9948b5432..754f31763 100644 --- a/storage/pipeline/commit_batch.go +++ b/storage/pipeline/commit_batch.go @@ -16,12 +16,12 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" diff --git a/storage/pipeline/precommit_batch.go b/storage/pipeline/precommit_batch.go index 7e25d500b..3a86c8628 100644 --- a/storage/pipeline/precommit_batch.go +++ b/storage/pipeline/precommit_batch.go @@ -13,12 +13,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" diff --git a/storage/pipeline/precommit_policy.go b/storage/pipeline/precommit_policy.go index e1b6e6be7..6e234f930 100644 --- a/storage/pipeline/precommit_policy.go +++ b/storage/pipeline/precommit_policy.go @@ -85,10 +85,15 @@ func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorP } if end == nil { - // no deal pieces, get expiration for committed capacity sector - expirationDuration, err := p.getCCSectorLifetime() + nv, err := p.api.StateNetworkVersion(ctx, types.EmptyTSK) if err != nil { - return 0, err + return 0, xerrors.Errorf("failed to get network version: %w", err) + } + + // no deal pieces, get expiration for committed capacity sector + expirationDuration, err := p.getCCSectorLifetime(nv) + if err != nil { + return 0, xerrors.Errorf("failed to get cc sector lifetime: %w", err) } tmp := ts.Height() + expirationDuration @@ -105,25 +110,30 @@ func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorP return *end, nil } -func (p *BasicPreCommitPolicy) getCCSectorLifetime() (abi.ChainEpoch, error) { +func (p *BasicPreCommitPolicy) getCCSectorLifetime(nv network.Version) (abi.ChainEpoch, error) { c, err := p.getSealingConfig() if err != nil { return 0, xerrors.Errorf("sealing config load error: %w", err) } + maxCommitment, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return 0, xerrors.Errorf("failed to get max extension: %w", err) + } + var ccLifetimeEpochs = abi.ChainEpoch(uint64(c.CommittedCapacitySectorLifetime.Seconds()) / builtin.EpochDurationSeconds) // if zero value in config, assume default sector extension if ccLifetimeEpochs == 0 { - ccLifetimeEpochs = policy.GetMaxSectorExpirationExtension() + ccLifetimeEpochs = maxCommitment } if minExpiration := abi.ChainEpoch(miner.MinSectorExpiration); ccLifetimeEpochs < minExpiration { log.Warnf("value for CommittedCapacitySectorLiftime is too short, using default minimum (%d epochs)", minExpiration) return minExpiration, nil } - if maxExpiration := policy.GetMaxSectorExpirationExtension(); ccLifetimeEpochs > maxExpiration { - log.Warnf("value for CommittedCapacitySectorLiftime is too long, using default maximum (%d epochs)", maxExpiration) - return maxExpiration, nil + if ccLifetimeEpochs > maxCommitment { + log.Warnf("value for CommittedCapacitySectorLiftime is too long, using default maximum (%d epochs)", maxCommitment) + return maxCommitment, nil } return ccLifetimeEpochs - p.provingBuffer, nil diff --git a/storage/pipeline/precommit_policy_test.go b/storage/pipeline/precommit_policy_test.go index 9f23e58d6..7865560de 100644 --- a/storage/pipeline/precommit_policy_test.go +++ b/storage/pipeline/precommit_policy_test.go @@ -68,7 +68,9 @@ func TestBasicPolicyEmptySector(t *testing.T) { require.NoError(t, err) // as set when there are no deal pieces - expected := h + policy.GetMaxSectorExpirationExtension() - pBuffer + maxExtension, err := policy.GetMaxSectorExpirationExtension(build.TestNetworkVersion) + assert.NoError(t, err) + expected := h + maxExtension - pBuffer assert.Equal(t, int(expected), int(exp)) } @@ -132,7 +134,7 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { cfg := fakeConfigGetter(nil) - policy := pipeline.NewBasicPreCommitPolicy(&fakeChain{ + pcp := pipeline.NewBasicPreCommitPolicy(&fakeChain{ h: abi.ChainEpoch(55), }, cfg, 0) @@ -152,11 +154,14 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { }, } - exp, err := policy.Expiration(context.Background(), pieces...) + exp, err := pcp.Expiration(context.Background(), pieces...) + require.NoError(t, err) + + maxLifetime, err := policy.GetMaxSectorExpirationExtension(build.TestNetworkVersion) require.NoError(t, err) // Treated as a CC sector, so expiration becomes currEpoch + maxLifetime = 55 + 1555200 - assert.Equal(t, 1555255, int(exp)) + assert.Equal(t, 55+maxLifetime, exp) } func TestMissingDealIsIgnored(t *testing.T) { diff --git a/storage/pipeline/sealiface/config.go b/storage/pipeline/sealiface/config.go index 99715fc28..e41b143ec 100644 --- a/storage/pipeline/sealiface/config.go +++ b/storage/pipeline/sealiface/config.go @@ -60,4 +60,6 @@ type Config struct { TerminateBatchMax uint64 TerminateBatchMin uint64 TerminateBatchWait time.Duration + + UseSyntheticPoRep bool } diff --git a/storage/pipeline/sealing.go b/storage/pipeline/sealing.go index d664de1e2..65d3fb14b 100644 --- a/storage/pipeline/sealing.go +++ b/storage/pipeline/sealing.go @@ -15,7 +15,6 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -24,6 +23,7 @@ import ( "github.com/filecoin-project/go-storedcounter" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" @@ -339,7 +339,12 @@ func (m *Sealing) currentSealProof(ctx context.Context) (abi.RegisteredSealProof return 0, err } - return lminer.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType) + c, err := m.getConfig() + if err != nil { + return 0, err + } + + return lminer.PreferredSealProofTypeFromWindowPoStType(ver, mi.WindowPoStProofType, c.UseSyntheticPoRep) } func (m *Sealing) minerSector(spt abi.RegisteredSealProof, num abi.SectorNumber) storiface.SectorRef { diff --git a/storage/pipeline/states_failed.go b/storage/pipeline/states_failed.go index 203f14910..3323c4c9b 100644 --- a/storage/pipeline/states_failed.go +++ b/storage/pipeline/states_failed.go @@ -10,12 +10,12 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) diff --git a/storage/pipeline/states_replica_update.go b/storage/pipeline/states_replica_update.go index e1b9cfc30..6717f49a6 100644 --- a/storage/pipeline/states_replica_update.go +++ b/storage/pipeline/states_replica_update.go @@ -10,12 +10,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go index 48d024f85..5c91161ef 100644 --- a/storage/pipeline/states_sealing.go +++ b/storage/pipeline/states_sealing.go @@ -17,7 +17,6 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" @@ -26,6 +25,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" @@ -232,6 +232,7 @@ func retrySoftErr(ctx context.Context, cb func() error) error { fallthrough case storiface.ErrTempAllocateSpace: // retry + log.Errorw("retrying soft error", "err", err, "code", cerr.ErrCode()) default: // non-temp error return err @@ -399,7 +400,12 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m } // Assume: both precommit msg & commit msg land on chain as early as possible - maxExpiration := ts.Height() + policy.GetPreCommitChallengeDelay() + policy.GetMaxSectorExpirationExtension() + maxExtension, err := policy.GetMaxSectorExpirationExtension(nv) + if err != nil { + return nil, big.Zero(), types.EmptyTSK, ctx.Send(SectorSealPreCommit1Failed{xerrors.Errorf("failed to get max extension: %w", err)}) + } + + maxExpiration := ts.Height() + policy.GetPreCommitChallengeDelay() + maxExtension if expiration > maxExpiration { expiration = maxExpiration } @@ -630,6 +636,7 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) porepProof, err = m.sealer.SealCommit2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), c2in) if err != nil { + log.Errorw("Commit2 error", "error", err) return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("computing seal proof failed(2): %w", err)}) } } else { diff --git a/storage/sealer/cbor_gen.go b/storage/sealer/cbor_gen.go index 4fa7fd980..22da1b520 100644 --- a/storage/sealer/cbor_gen.go +++ b/storage/sealer/cbor_gen.go @@ -40,7 +40,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { return err } - if _, err := io.WriteString(w, string("ID")); err != nil { + if _, err := cw.WriteString(string("ID")); err != nil { return err } @@ -56,7 +56,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("State"))); err != nil { return err } - if _, err := io.WriteString(w, string("State")); err != nil { + if _, err := cw.WriteString(string("State")); err != nil { return err } @@ -72,7 +72,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Result"))); err != nil { return err } - if _, err := io.WriteString(w, string("Result")); err != nil { + if _, err := cw.WriteString(string("Result")); err != nil { return err } @@ -88,7 +88,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("RetType"))); err != nil { return err } - if _, err := io.WriteString(w, string("RetType")); err != nil { + if _, err := cw.WriteString(string("RetType")); err != nil { return err } @@ -99,7 +99,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.RetType))); err != nil { return err } - if _, err := io.WriteString(w, string(t.RetType)); err != nil { + if _, err := cw.WriteString(string(t.RetType)); err != nil { return err } return nil @@ -228,7 +228,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { return err } - if _, err := io.WriteString(w, string("ID")); err != nil { + if _, err := cw.WriteString(string("ID")); err != nil { return err } @@ -244,7 +244,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Status"))); err != nil { return err } - if _, err := io.WriteString(w, string("Status")); err != nil { + if _, err := cw.WriteString(string("Status")); err != nil { return err } @@ -255,7 +255,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Status))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Status)); err != nil { + if _, err := cw.WriteString(string(t.Status)); err != nil { return err } @@ -267,7 +267,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartTime"))); err != nil { return err } - if _, err := io.WriteString(w, string("StartTime")); err != nil { + if _, err := cw.WriteString(string("StartTime")); err != nil { return err } @@ -289,7 +289,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WorkError"))); err != nil { return err } - if _, err := io.WriteString(w, string("WorkError")); err != nil { + if _, err := cw.WriteString(string("WorkError")); err != nil { return err } @@ -300,7 +300,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.WorkError))); err != nil { return err } - if _, err := io.WriteString(w, string(t.WorkError)); err != nil { + if _, err := cw.WriteString(string(t.WorkError)); err != nil { return err } @@ -312,7 +312,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WorkerCall"))); err != nil { return err } - if _, err := io.WriteString(w, string("WorkerCall")); err != nil { + if _, err := cw.WriteString(string("WorkerCall")); err != nil { return err } @@ -328,7 +328,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WorkerHostname"))); err != nil { return err } - if _, err := io.WriteString(w, string("WorkerHostname")); err != nil { + if _, err := cw.WriteString(string("WorkerHostname")); err != nil { return err } @@ -339,7 +339,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.WorkerHostname))); err != nil { return err } - if _, err := io.WriteString(w, string(t.WorkerHostname)); err != nil { + if _, err := cw.WriteString(string(t.WorkerHostname)); err != nil { return err } return nil @@ -491,7 +491,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Method"))); err != nil { return err } - if _, err := io.WriteString(w, string("Method")); err != nil { + if _, err := cw.WriteString(string("Method")); err != nil { return err } @@ -502,7 +502,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Method))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Method)); err != nil { + if _, err := cw.WriteString(string(t.Method)); err != nil { return err } @@ -514,7 +514,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Params"))); err != nil { return err } - if _, err := io.WriteString(w, string("Params")); err != nil { + if _, err := cw.WriteString(string("Params")); err != nil { return err } @@ -525,7 +525,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Params))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Params)); err != nil { + if _, err := cw.WriteString(string(t.Params)); err != nil { return err } return nil diff --git a/storage/sealer/ffiwrapper/sealer_cgo.go b/storage/sealer/ffiwrapper/sealer_cgo.go index c8087875e..812a69fa7 100644 --- a/storage/sealer/ffiwrapper/sealer_cgo.go +++ b/storage/sealer/ffiwrapper/sealer_cgo.go @@ -900,15 +900,49 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storiface.SectorRef return storiface.SectorCids{}, xerrors.Errorf("unmarshaling pc1 output: %w", err) } - var ticket abi.SealRandomness ti, found := p1odec["_lotus_SealRandomness"] + if abi.Synthetic[sector.ProofType] { + if !found { + return storiface.SectorCids{}, xerrors.Errorf("synthetic mode: ticket not found") + } + } + if found { - ticket, err = base64.StdEncoding.DecodeString(ti.(string)) + ticket, err := base64.StdEncoding.DecodeString(ti.(string)) if err != nil { return storiface.SectorCids{}, xerrors.Errorf("decoding ticket: %w", err) } + if abi.Synthetic[sector.ProofType] { + // note: we generate synth porep challenges first because the C1 check below reads from those + + err = ffi.GenerateSynthProofs( + sector.ProofType, + sealedCID, + unsealedCID, + paths.Cache, + paths.Sealed, + sector.ID.Number, + sector.ID.Miner, ticket, + []abi.PieceInfo{{Size: abi.PaddedPieceSize(ssize), PieceCID: unsealedCID}}) + if err != nil { + log.Warn("GenerateSynthProofs() failed: ", err) + log.Warnf("num:%d tkt:%v, sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, sealedCID, unsealedCID) + return storiface.SectorCids{}, xerrors.Errorf("generate synth proofs: %w", err) + } + + if err = ffi.ClearCache(uint64(ssize), paths.Cache); err != nil { + log.Warn("failed to GenerateSynthProofs(): ", err) + log.Warnf("num:%d tkt:%v, sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, sealedCID, unsealedCID) + return storiface.SectorCids{ + Unsealed: unsealedCID, + Sealed: sealedCID, + }, nil + // Note: non-fatal error. + } + } + for i := 0; i < PC2CheckRounds; i++ { var sd [32]byte _, _ = rand.Read(sd[:]) @@ -964,6 +998,7 @@ func (sb *Sealer) SealCommit1(ctx context.Context, sector storiface.SectorRef, t return nil, xerrors.Errorf("StandaloneSealCommit: %w", err) } + return output, nil } @@ -1153,6 +1188,13 @@ func (sb *Sealer) FinalizeSector(ctx context.Context, sector storiface.SectorRef } defer done() + if abi.Synthetic[sector.ProofType] { + if err = ffi.ClearSyntheticProofs(uint64(ssize), paths.Cache); err != nil { + log.Warn("Unable to delete Synth cache:", err) + // Pass-Thru on error. + } + } + return ffi.ClearCache(uint64(ssize), paths.Cache) } @@ -1191,6 +1233,13 @@ func (sb *Sealer) FinalizeSectorInto(ctx context.Context, sector storiface.Secto } } + if abi.Synthetic[sector.ProofType] { + if err = ffi.ClearSyntheticProofs(uint64(ssize), dest); err != nil { + log.Warn("Unable to delete Synth cache:", err) + // Pass-Thru on error. + } + } + return ffi.ClearCache(uint64(ssize), dest) } @@ -1207,6 +1256,12 @@ func (sb *Sealer) FinalizeReplicaUpdate(ctx context.Context, sector storiface.Se } defer done() + if abi.Synthetic[sector.ProofType] { + if err = ffi.ClearSyntheticProofs(uint64(ssize), paths.Cache); err != nil { + return xerrors.Errorf("clear synth cache: %w", err) + } + } + if err := ffi.ClearCache(uint64(ssize), paths.Cache); err != nil { return xerrors.Errorf("clear cache: %w", err) } @@ -1219,6 +1274,8 @@ func (sb *Sealer) FinalizeReplicaUpdate(ctx context.Context, sector storiface.Se } defer done() + // note: synth cache is not a thing for snapdeals + if err := ffi.ClearCache(uint64(ssize), paths.UpdateCache); err != nil { return xerrors.Errorf("clear cache: %w", err) } diff --git a/storage/sealer/ffiwrapper/sealer_test.go b/storage/sealer/ffiwrapper/sealer_test.go index 73b2ad52f..da1b98429 100644 --- a/storage/sealer/ffiwrapper/sealer_test.go +++ b/storage/sealer/ffiwrapper/sealer_test.go @@ -1090,7 +1090,111 @@ func TestDCAPCloses(t *testing.T) { require.Equal(t, "baga6ea4seaqeje7jy4hufnybpo7ckxzujaigqbcxhdjq7ojb4b6xzgqdugkyciq", c.PieceCID.String()) require.True(t, clr.closed) }) +} +func TestSealAndVerifySynth(t *testing.T) { + sealProofType = abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep + + if testing.Short() { + t.Skip("skipping test in short mode") + } + + defer requireFDsClosed(t, openFDs(t)) + + if runtime.NumCPU() < 10 && os.Getenv("CI") == "" { // don't bother on slow hardware + t.Skip("this is slow") + } + _ = os.Setenv("RUST_LOG", "info") + + getGrothParamFileAndVerifyingKeys(sectorSize) + + cdir, err := os.MkdirTemp("", "sbtest-c-") + if err != nil { + t.Fatal(err) + } + miner := abi.ActorID(123) + + synthPorRepVProofsName := "syn-porep-vanilla-proofs.dat" + + printFileList := func(stage string, expectSynthPorep bool) { + var hasSynthPorep bool + + fmt.Println("----file list:", stage) + err := filepath.Walk(cdir, func(path string, info os.FileInfo, err error) error { + if strings.Contains(path, synthPorRepVProofsName) { + hasSynthPorep = true + } + fmt.Println(path) + return nil + }) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, expectSynthPorep, hasSynthPorep) + + fmt.Println("----") + } + + sp := &basicfs.Provider{ + Root: cdir, + } + sb, err := New(sp) + if err != nil { + t.Fatalf("%+v", err) + } + t.Cleanup(func() { + if t.Failed() { + fmt.Printf("not removing %s\n", cdir) + return + } + if err := os.RemoveAll(cdir); err != nil { + t.Error(err) + } + }) + + si := storiface.SectorRef{ + ID: abi.SectorID{Miner: miner, Number: 1}, + ProofType: sealProofType, + } + + s := seal{ref: si} + + start := time.Now() + + s.precommit(t, sb, si, func() {}) + + printFileList("precommit", true) + + precommit := time.Now() + + s.commit(t, sb, func() {}) + + printFileList("commit", true) + + commit := time.Now() + + post(t, sb, nil, s) + + printFileList("post", true) + + epost := time.Now() + + post(t, sb, nil, s) + + if err := sb.FinalizeSector(context.TODO(), si); err != nil { + t.Fatalf("%+v", err) + } + + printFileList("finalize", false) + + s.unseal(t, sb, sp, si, func() {}) + + printFileList("unseal", false) + + fmt.Printf("PreCommit: %s\n", precommit.Sub(start).String()) + fmt.Printf("Commit: %s\n", commit.Sub(precommit).String()) + fmt.Printf("EPoSt: %s\n", epost.Sub(commit).String()) } type closeAssertReader struct { diff --git a/storage/sealer/manager.go b/storage/sealer/manager.go index 700a5aec5..41b3a1b39 100644 --- a/storage/sealer/manager.go +++ b/storage/sealer/manager.go @@ -802,7 +802,7 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storiface.Se { unsealedStores, ferr := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) - if err != nil { + if ferr != nil { err = multierr.Append(err, xerrors.Errorf("find unsealed sector before move: %w", ferr)) } else if len(unsealedStores) > 0 { // if we found unsealed files, AND have been asked to keep at least one piece, move unsealed diff --git a/storage/sealer/proofpaths/cachefiles.go b/storage/sealer/proofpaths/cachefiles.go index 5e41f831d..24b29e9f5 100644 --- a/storage/sealer/proofpaths/cachefiles.go +++ b/storage/sealer/proofpaths/cachefiles.go @@ -14,15 +14,15 @@ func LayerFileName(layer int) string { func SDRLayers(spt abi.RegisteredSealProof) (int, error) { switch spt { - case abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg2KiBV1_1: + case abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg2KiBV1_1, abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep: return 2, nil - case abi.RegisteredSealProof_StackedDrg8MiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1_1: + case abi.RegisteredSealProof_StackedDrg8MiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1_1, abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep: return 2, nil - case abi.RegisteredSealProof_StackedDrg512MiBV1, abi.RegisteredSealProof_StackedDrg512MiBV1_1: + case abi.RegisteredSealProof_StackedDrg512MiBV1, abi.RegisteredSealProof_StackedDrg512MiBV1_1, abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep: return 2, nil - case abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1_1: + case abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1_1, abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep: return 11, nil - case abi.RegisteredSealProof_StackedDrg64GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1_1: + case abi.RegisteredSealProof_StackedDrg64GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1_1, abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep: return 11, nil default: return 0, fmt.Errorf("unsupported proof type: %v", spt) diff --git a/storage/sealer/storiface/cbor_gen.go b/storage/sealer/storiface/cbor_gen.go index 2f82da3e6..0b42136ea 100644 --- a/storage/sealer/storiface/cbor_gen.go +++ b/storage/sealer/storiface/cbor_gen.go @@ -38,7 +38,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("ID"))); err != nil { return err } - if _, err := io.WriteString(w, string("ID")); err != nil { + if _, err := cw.WriteString(string("ID")); err != nil { return err } @@ -62,7 +62,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Sector"))); err != nil { return err } - if _, err := io.WriteString(w, string("Sector")); err != nil { + if _, err := cw.WriteString(string("Sector")); err != nil { return err } @@ -173,7 +173,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Key"))); err != nil { return err } - if _, err := io.WriteString(w, string("Key")); err != nil { + if _, err := cw.WriteString(string("Key")); err != nil { return err } @@ -184,7 +184,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Key)); err != nil { + if _, err := cw.WriteString(string(t.Key)); err != nil { return err } @@ -196,7 +196,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Value"))); err != nil { return err } - if _, err := io.WriteString(w, string("Value")); err != nil { + if _, err := cw.WriteString(string("Value")); err != nil { return err } @@ -207,7 +207,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Value))); err != nil { return err } - if _, err := io.WriteString(w, string(t.Value)); err != nil { + if _, err := cw.WriteString(string(t.Value)); err != nil { return err } return nil @@ -302,7 +302,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("URL"))); err != nil { return err } - if _, err := io.WriteString(w, string("URL")); err != nil { + if _, err := cw.WriteString(string("URL")); err != nil { return err } @@ -313,7 +313,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.URL))); err != nil { return err } - if _, err := io.WriteString(w, string(t.URL)); err != nil { + if _, err := cw.WriteString(string(t.URL)); err != nil { return err } @@ -325,7 +325,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Local"))); err != nil { return err } - if _, err := io.WriteString(w, string("Local")); err != nil { + if _, err := cw.WriteString(string("Local")); err != nil { return err } @@ -341,7 +341,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Headers"))); err != nil { return err } - if _, err := io.WriteString(w, string("Headers")); err != nil { + if _, err := cw.WriteString(string("Headers")); err != nil { return err } @@ -448,13 +448,22 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { } for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + _ = maj + _ = extra + _ = err - var v SecDataHttpHeader - if err := v.UnmarshalCBOR(cr); err != nil { - return err + { + + if err := t.Headers[i].UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.Headers[i]: %w", err) + } + + } } - - t.Headers[i] = v } default: diff --git a/storage/sealer/storiface/resources.go b/storage/sealer/storiface/resources.go index 0fd80d79a..6f8d83265 100644 --- a/storage/sealer/storiface/resources.go +++ b/storage/sealer/storiface/resources.go @@ -587,13 +587,18 @@ func init() { ResourceTable[sealtasks.TTDataCid][proof] = ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg32GiBV1] } - // V1_1 is the same as V1 + // V1_1 and SynthethicpoRep is the same as V1 for _, m := range ResourceTable { m[abi.RegisteredSealProof_StackedDrg2KiBV1_1] = m[abi.RegisteredSealProof_StackedDrg2KiBV1] + m[abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep] = m[abi.RegisteredSealProof_StackedDrg2KiBV1] m[abi.RegisteredSealProof_StackedDrg8MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg8MiBV1] + m[abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep] = m[abi.RegisteredSealProof_StackedDrg8MiBV1] m[abi.RegisteredSealProof_StackedDrg512MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg512MiBV1] + m[abi.RegisteredSealProof_StackedDrg512MiBV1_1_Feat_SyntheticPoRep] = m[abi.RegisteredSealProof_StackedDrg512MiBV1] m[abi.RegisteredSealProof_StackedDrg32GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg32GiBV1] + m[abi.RegisteredSealProof_StackedDrg32GiBV1_1_Feat_SyntheticPoRep] = m[abi.RegisteredSealProof_StackedDrg32GiBV1] m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] + m[abi.RegisteredSealProof_StackedDrg64GiBV1_1_Feat_SyntheticPoRep] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] } } diff --git a/storage/wdpost/wdpost_changehandler.go b/storage/wdpost/wdpost_changehandler.go index 1d4432459..ce58f1489 100644 --- a/storage/wdpost/wdpost_changehandler.go +++ b/storage/wdpost/wdpost_changehandler.go @@ -6,9 +6,9 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" ) @@ -531,12 +531,12 @@ func NextDeadline(currentDeadline *dline.Info) *dline.Info { newDeadline := currentDeadline.Index + 1 if newDeadline == miner.WPoStPeriodDeadlines { newDeadline = 0 - periodStart = periodStart + miner.WPoStProvingPeriod + periodStart = periodStart + miner.WPoStProvingPeriod() } return NewDeadlineInfo(periodStart, newDeadline, currentDeadline.CurrentEpoch) } func NewDeadlineInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch) *dline.Info { - return dline.NewInfo(periodStart, deadlineIdx, currEpoch, miner.WPoStPeriodDeadlines, miner.WPoStProvingPeriod, miner.WPoStChallengeWindow, miner.WPoStChallengeLookback, miner.FaultDeclarationCutoff) + return dline.NewInfo(periodStart, deadlineIdx, currEpoch, miner.WPoStPeriodDeadlines, miner.WPoStProvingPeriod(), miner.WPoStChallengeWindow(), miner.WPoStChallengeLookback, miner.FaultDeclarationCutoff) } diff --git a/storage/wdpost/wdpost_journal.go b/storage/wdpost/wdpost_journal.go index 406628f68..68cad3fcf 100644 --- a/storage/wdpost/wdpost_journal.go +++ b/storage/wdpost/wdpost_journal.go @@ -4,8 +4,9 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/dline" + + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" ) // SchedulerState defines the possible states in which the scheduler could be, diff --git a/storage/wdpost/wdpost_run.go b/storage/wdpost/wdpost_run.go index c7fff3b70..edc088ccf 100644 --- a/storage/wdpost/wdpost_run.go +++ b/storage/wdpost/wdpost_run.go @@ -15,7 +15,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" @@ -25,6 +24,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" @@ -651,7 +651,7 @@ func (s *WindowPoStScheduler) submitPoStMessage(ctx context.Context, proof *mine Params: enc, Value: types.NewInt(0), } - spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} + spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee), MaximizeFeeCap: s.feeCfg.MaximizeWindowPoStFeeCap} if err := s.prepareMessage(ctx, msg, spec); err != nil { return nil, err } diff --git a/storage/wdpost/wdpost_run_faults.go b/storage/wdpost/wdpost_run_faults.go index f36b30d35..3a41cc4cc 100644 --- a/storage/wdpost/wdpost_run_faults.go +++ b/storage/wdpost/wdpost_run_faults.go @@ -14,12 +14,12 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" ) @@ -168,11 +168,11 @@ func (s *WindowPoStScheduler) declareRecoveries(ctx context.Context, dlIdx uint6 Params: enc, Value: types.NewInt(0), } - spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)} + spec := &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee), MaximizeFeeCap: s.feeCfg.MaximizeWindowPoStFeeCap} if err := s.prepareMessage(ctx, msg, spec); err != nil { return nil, nil, err } - sm, err := s.api.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{MaxFee: abi.TokenAmount(s.feeCfg.MaxWindowPoStGasFee)}) + sm, err := s.api.MpoolPushMessage(ctx, msg, spec) if err != nil { return nil, nil, xerrors.Errorf("pushing message to mpool: %w", err) } diff --git a/storage/wdpost/wdpost_run_test.go b/storage/wdpost/wdpost_run_test.go index bfc9ef9c1..a3847a6f6 100644 --- a/storage/wdpost/wdpost_run_test.go +++ b/storage/wdpost/wdpost_run_test.go @@ -298,7 +298,7 @@ func TestWDPostDoPostPartLimitConfig(t *testing.T) { //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 ctx := context.Background() - expectedMsgCount := 364 + expectedMsgCount := 8 proofType := abi.RegisteredPoStProof_StackedDrgWindow2KiBV1 postAct := tutils.NewIDAddr(t, 100) @@ -318,15 +318,15 @@ func TestWDPostDoPostPartLimitConfig(t *testing.T) { partitionsPerMsg = minertypes.AddressedPartitionsMax } - partitionCount := 4 * partitionsPerMsg + partitionCount := 5 * partitionsPerMsg // Assert that user config is less than network limit - userPartLimit := 33 - lastMsgParts := 21 - require.Greater(t, partitionCount, userPartLimit) + userPartLimit := 2 + lastMsgParts := 1 + require.Greater(t, partitionsPerMsg, userPartLimit) // Assert that we consts are correct - require.Equal(t, (expectedMsgCount-1)*userPartLimit+lastMsgParts, 4*partitionsPerMsg) + require.Equal(t, (expectedMsgCount-1)*userPartLimit+lastMsgParts, partitionCount) var partitions []api.Partition for p := 0; p < partitionCount; p++ { @@ -398,7 +398,7 @@ func TestBatchPartitionsRecoverySectors(t *testing.T) { mockStgMinerAPI := newMockStorageMinerAPI() - userPartLimit := 4 + userPartLimit := 2 scheduler := &WindowPoStScheduler{ api: mockStgMinerAPI, @@ -426,12 +426,12 @@ func TestBatchPartitionsRecoverySectors(t *testing.T) { } partitions = append(partitions, generatePartition(100, 10)) - expectedBatchLens := []int{4, 1, 1, 4, 2, 1} + expectedBatchLens := []int{2, 2, 1, 1, 2, 2, 2, 1} - batches, err := scheduler.BatchPartitions(partitions, network.Version16) + batches, err := scheduler.BatchPartitions(partitions, network.Version21) require.NoError(t, err) - require.Equal(t, len(batches), 6) + require.Equal(t, len(batches), len(expectedBatchLens)) for i, batch := range batches { require.Equal(t, len(batch), expectedBatchLens[i]) diff --git a/storage/wdpost/wdpost_sched.go b/storage/wdpost/wdpost_sched.go index 4a4624f47..bbf4596fe 100644 --- a/storage/wdpost/wdpost_sched.go +++ b/storage/wdpost/wdpost_sched.go @@ -12,14 +12,13 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" @@ -47,7 +46,7 @@ type NodeAPI interface { StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*lminer.SectorLocation, error) + StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) MpoolPushMessage(context.Context, *types.Message, *api.MessageSendSpec) (*types.SignedMessage, error)