Merge branch 'master' into asr/fix-eth-api-gateway

This commit is contained in:
Raúl Kripalani 2023-03-12 15:56:55 +00:00
commit 2e56237898
86 changed files with 1882 additions and 486 deletions

View File

@ -9,15 +9,9 @@ body:
options: options:
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy). - label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
required: true required: true
- label: This is **not** a question or a support request. If you have any lotus related questions, please ask in the [lotus forum](https://github.com/filecoin-project/lotus/discussions).
required: true
- label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead.
required: true
- label: This is **not** an enhancement request. If it is, please file a [improvement suggestion](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Fenhancement&template=enhancement.yml) instead.
required: true
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion. - label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
required: true required: true
- label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), or the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these. - label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these.
required: true required: true
- label: I did not make any code changes to lotus. - label: I did not make any code changes to lotus.
required: false required: false
@ -28,19 +22,11 @@ body:
options: options:
- label: lotus daemon - chain sync - label: lotus daemon - chain sync
required: false required: false
- label: lotus miner - mining and block production - label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false required: false
- label: lotus miner/worker - sealing - label: lotus miner/worker - sealing
required: false required: false
- label: lotus miner - proving(WindowPoSt) - label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus miner/market - storage deal
required: false
- label: lotus miner/market - retrieval deal
required: false
- label: lotus miner/market - data transfer
required: false
- label: lotus client
required: false required: false
- label: lotus JSON-RPC API - label: lotus JSON-RPC API
required: false required: false
@ -56,22 +42,33 @@ body:
description: Enter the output of `lotus version` and `lotus-miner version` if applicable. description: Enter the output of `lotus version` and `lotus-miner version` if applicable.
placeholder: | placeholder: |
e.g. e.g.
Daemon:1.11.0-rc2+debug+git.0519cd371.dirty+api1.3.0 Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
Local: lotus version 1.11.0-rc2+debug+git.0519cd371.dirty Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
validations: validations:
required: true required: true
- type: textarea
id: ReproSteps
attributes:
label: Repro Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false
- type: textarea - type: textarea
id: Description id: Description
attributes: attributes:
label: Describe the Bug label: Describe the Bug
description: | description: |
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information: This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
* What you were doding when you experienced the bug? * What you were doing when you experienced the bug?
* Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas). * Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
* What is the expected behaviour? * What is the expected behaviour?
* For sealing issues, include the output of `lotus-miner sectors status --log <sectorId>` for the failed sector(s). * For sealing issues, include the output of `lotus-miner sectors status --log <sectorId>` for the failed sector(s).
* For proving issues, include the output of `lotus-miner proving` info. * For proving issues, include the output of `lotus-miner proving` info.
* For deal making issues, include the output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question.
validations: validations:
required: true required: true
- type: textarea - type: textarea
@ -83,18 +80,6 @@ body:
Please provide debug logs of the problem, remember you can get set log level control for: Please provide debug logs of the problem, remember you can get set log level control for:
* lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://lotus.filecoin.io/lotus/configure/defaults/#log-level-control). * lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://lotus.filecoin.io/lotus/configure/defaults/#log-level-control).
* lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level * lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level
If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem. If you don't provide detailed logs when you raise the issue it will almost certainly be the first request we make before furthur diagnosing the problem.
validations: validations:
required: true required: true
- type: textarea
id: RepoSteps
attributes:
label: Repo Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: Ask a question about Lotus or get support
url: https://github.com/filecoin-project/lotus/discussions/new/choose
about: Ask a question or request support for using Lotus
- name: Filecoin protocol feature or enhancement
url: https://github.com/filecoin-project/FIPs/discussions/new/choose
about: Write a discussion in the Filecoin Improvement Proposal repo

View File

@ -7,13 +7,7 @@ body:
label: Checklist label: Checklist
description: Please check off the following boxes before continuing to create an improvement suggestion! description: Please check off the following boxes before continuing to create an improvement suggestion!
options: options:
- label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md). - label: I **have** a specific, actionable, and well motivated improvement to an existing lotus feature.
required: true
- label: This is **not** a new feature request. If it is, please file a [feature request](https://github.com/filecoin-project/lotus/issues/new?assignees=&labels=need%2Ftriage%2Ckind%2Ffeature&template=feature_request.yml) instead.
required: true
- label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`.
required: true
- label: I **have** a specific, actionable, and well motivated improvement to propose.
required: true required: true
- type: checkboxes - type: checkboxes
attributes: attributes:
@ -22,19 +16,11 @@ body:
options: options:
- label: lotus daemon - chain sync - label: lotus daemon - chain sync
required: false required: false
- label: lotus miner - mining and block production - label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false required: false
- label: lotus miner/worker - sealing - label: lotus miner/worker - sealing
required: false required: false
- label: lotus miner - proving(WindowPoSt) - label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus miner/market - storage deal
required: false
- label: lotus miner/market - retrieval deal
required: false
- label: lotus miner/market - data transfer
required: false
- label: lotus client
required: false required: false
- label: lotus JSON-RPC API - label: lotus JSON-RPC API
required: false required: false
@ -45,9 +31,17 @@ body:
- type: textarea - type: textarea
id: request id: request
attributes: attributes:
label: Improvement Suggestion label: Enhancement Suggestion
description: A clear and concise description of what the motivation or the current problem is and what is the suggested improvement? description: A clear and concise description of the suggested enhancement?
placeholder: Ex. Currently lotus... However, as a storage provider, I'd like... placeholder: Ex. Currently lotus... However it would be great if [enhancement] was implemented... With the ability to...
validations:
required: true
- type: textarea
id: request
attributes:
label: Use-Case
description: How would this enhancement help you?
placeholder: Ex. With the [enhancement] node operators would be able to... For Storage Providers it would enable...
validations: validations:
required: true required: true

View File

@ -7,8 +7,6 @@ body:
label: Checklist label: Checklist
description: Please check off the following boxes before continuing to create a new feature request! description: Please check off the following boxes before continuing to create a new feature request!
options: options:
- label: This is **not** a new feature or an enhancement to the Filecoin protocol. If it is, please open an [FIP issue](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0001.md).
required: true
- label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`. - label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`.
required: true required: true
- label: I **have** a specific, actionable, and well motivated feature request to propose. - label: I **have** a specific, actionable, and well motivated feature request to propose.
@ -20,19 +18,11 @@ body:
options: options:
- label: lotus daemon - chain sync - label: lotus daemon - chain sync
required: false required: false
- label: lotus miner - mining and block production - label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false required: false
- label: lotus miner/worker - sealing - label: lotus miner/worker - sealing
required: false required: false
- label: lotus miner - proving(WindowPoSt) - label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus miner/market - storage deal
required: false
- label: lotus miner/market - retrieval deal
required: false
- label: lotus miner/market - data transfer
required: false
- label: lotus client
required: false required: false
- label: lotus JSON-RPC API - label: lotus JSON-RPC API
required: false required: false
@ -56,7 +46,7 @@ body:
validations: validations:
required: true required: true
- type: textarea - type: textarea
id: alternates id: alternatives
attributes: attributes:
label: Describe alternatives you've considered label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered. description: A clear and concise description of any alternative solutions or features you've considered.
@ -69,4 +59,3 @@ body:
description: Add any other context, design docs or screenshots about the feature request here. description: Add any other context, design docs or screenshots about the feature request here.
validations: validations:
required: false required: false

View File

@ -0,0 +1,83 @@
name: "Bug Report - developer/service provider"
description: "Bug report template about FEVM/FVM for developers/service providers"
labels: [need/triage, kind/bug, area/fevm]
body:
- type: checkboxes
attributes:
label: Checklist
description: Please check off the following boxes before continuing to file a bug report!
options:
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
required: true
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
required: true
- label: I did not make any code changes to lotus.
required: false
- type: checkboxes
attributes:
label: Lotus component
description: Please select the lotus component you are filing a bug for
options:
- label: lotus Ethereum RPC
required: false
- label: lotus FVM - Lotus FVM interactions
required: false
- label: FEVM tooling
required: false
- label: Other
required: false
- type: textarea
id: version
attributes:
label: Lotus Version
render: text
description: Enter the output of `lotus version` if applicable.
placeholder: |
e.g.
Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
validations:
required: true
- type: textarea
id: repro
attributes:
label: Repro Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false
- type: textarea
id: Description
attributes:
label: Describe the Bug
description: |
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
* What you were doing when you experienced the bug? What are you trying to build?
* Any *error* messages and logs you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
* What is the expected behaviour? Links to the actual code?
validations:
required: true
- type: textarea
id: toolingInfo
attributes:
label: Tooling
render: text
description: |
What kind of tooling are you using:
* Are you using ether.js, Alchemy, Hardhat, etc.
validations:
required: true
- type: textarea
id: extraInfo
attributes:
label: Configuration Options
render: text
description: |
Please provide your updated FEVM related configuration options, or custome enviroment variables related to Lotus FEVM
* lotus: use `lotus config updated` to get your configuration options, and copy the [FEVM] section
validations:
required: true

View File

@ -15,6 +15,7 @@ import (
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
// MODIFYING THE API INTERFACE // MODIFYING THE API INTERFACE
@ -25,7 +26,7 @@ import (
// When adding / changing methods in this file: // When adding / changing methods in this file:
// * Do the change here // * Do the change here
// * Adjust implementation in `node/impl/` // * Adjust implementation in `node/impl/`
// * Run `make gen` - this will: // * Run `make clean && make deps && make gen` - this will:
// * Generate proxy structs // * Generate proxy structs
// * Generate mocks // * Generate mocks
// * Generate markdown docs // * Generate markdown docs
@ -47,15 +48,18 @@ type Gateway interface {
ChainReadObj(context.Context, cid.Cid) ([]byte, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainGetGenesis(context.Context) (*types.TipSet, error) ChainGetGenesis(context.Context) (*types.TipSet, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error) MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
@ -63,6 +67,7 @@ type Gateway interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)

View File

@ -401,6 +401,10 @@ func init() {
FromBlock: pstring("2301220"), FromBlock: pstring("2301220"),
Address: []ethtypes.EthAddress{ethaddr}, Address: []ethtypes.EthAddress{ethaddr},
}) })
percent := types.Percent(123)
addExample(percent)
addExample(&percent)
} }
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {

View File

@ -720,6 +720,8 @@ type GatewayMethods struct {
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
@ -736,8 +738,12 @@ type GatewayMethods struct {
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) ``
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) `` StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) ``
StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) ``
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `` StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `` StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
@ -754,9 +760,11 @@ type GatewayMethods struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `` StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) ``
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"` StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) ``
StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
@ -4563,6 +4571,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
if s.Internal.MpoolGetNonce == nil {
return 0, ErrNotSupported
}
return s.Internal.MpoolGetNonce(p0, p1)
}
func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
return 0, ErrNotSupported
}
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
if s.Internal.MpoolPush == nil { if s.Internal.MpoolPush == nil {
return *new(cid.Cid), ErrNotSupported return *new(cid.Cid), ErrNotSupported
@ -4651,6 +4670,17 @@ func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2
return *new(address.Address), ErrNotSupported return *new(address.Address), ErrNotSupported
} }
func (s *GatewayStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) {
if s.Internal.StateCall == nil {
return nil, ErrNotSupported
}
return s.Internal.StateCall(p0, p1, p2)
}
func (s *GatewayStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) {
if s.Internal.StateDealProviderCollateralBounds == nil { if s.Internal.StateDealProviderCollateralBounds == nil {
return *new(DealCollateralBounds), ErrNotSupported return *new(DealCollateralBounds), ErrNotSupported
@ -4662,6 +4692,17 @@ func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 a
return *new(DealCollateralBounds), ErrNotSupported return *new(DealCollateralBounds), ErrNotSupported
} }
func (s *GatewayStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
if s.Internal.StateDecodeParams == nil {
return nil, ErrNotSupported
}
return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4)
}
func (s *GatewayStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
if s.Internal.StateGetActor == nil { if s.Internal.StateGetActor == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -4750,6 +4791,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
if s.Internal.StateNetworkName == nil {
return *new(dtypes.NetworkName), ErrNotSupported
}
return s.Internal.StateNetworkName(p0)
}
func (s *GatewayStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
return *new(dtypes.NetworkName), ErrNotSupported
}
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
if s.Internal.StateNetworkVersion == nil { if s.Internal.StateNetworkVersion == nil {
return *new(apitypes.NetworkVersion), ErrNotSupported return *new(apitypes.NetworkVersion), ErrNotSupported

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
// MODIFYING THE API INTERFACE // MODIFYING THE API INTERFACE
@ -44,12 +45,15 @@ type Gateway interface {
ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
@ -59,6 +63,7 @@ type Gateway interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error) StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error)
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)

View File

@ -451,6 +451,8 @@ type GatewayMethods struct {
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
@ -461,8 +463,12 @@ type GatewayMethods struct {
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) ``
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `` StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) ``
StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) ``
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `` StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `` StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
@ -481,6 +487,8 @@ type GatewayMethods struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) `` StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `` StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
@ -2677,6 +2685,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
if s.Internal.MpoolGetNonce == nil {
return 0, ErrNotSupported
}
return s.Internal.MpoolGetNonce(p0, p1)
}
func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
return 0, ErrNotSupported
}
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
if s.Internal.MpoolPush == nil { if s.Internal.MpoolPush == nil {
return *new(cid.Cid), ErrNotSupported return *new(cid.Cid), ErrNotSupported
@ -2732,6 +2751,17 @@ func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2
return *new(address.Address), ErrNotSupported return *new(address.Address), ErrNotSupported
} }
func (s *GatewayStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
if s.Internal.StateCall == nil {
return nil, ErrNotSupported
}
return s.Internal.StateCall(p0, p1, p2)
}
func (s *GatewayStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) {
if s.Internal.StateDealProviderCollateralBounds == nil { if s.Internal.StateDealProviderCollateralBounds == nil {
return *new(api.DealCollateralBounds), ErrNotSupported return *new(api.DealCollateralBounds), ErrNotSupported
@ -2743,6 +2773,17 @@ func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 a
return *new(api.DealCollateralBounds), ErrNotSupported return *new(api.DealCollateralBounds), ErrNotSupported
} }
func (s *GatewayStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
if s.Internal.StateDecodeParams == nil {
return nil, ErrNotSupported
}
return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4)
}
func (s *GatewayStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
if s.Internal.StateGetActor == nil { if s.Internal.StateGetActor == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -2842,6 +2883,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
if s.Internal.StateNetworkName == nil {
return *new(dtypes.NetworkName), ErrNotSupported
}
return s.Internal.StateNetworkName(p0)
}
func (s *GatewayStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
return *new(dtypes.NetworkName), ErrNotSupported
}
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) { func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
if s.Internal.StateNetworkVersion == nil { if s.Internal.StateNetworkVersion == nil {
return *new(abinetwork.Version), ErrNotSupported return *new(abinetwork.Version), ErrNotSupported

View File

@ -115,6 +115,23 @@ type Config struct {
// A positive value is the number of compactions before a full GC is performed; // A positive value is the number of compactions before a full GC is performed;
// a value of 1 will perform full GC in every compaction. // a value of 1 will perform full GC in every compaction.
HotStoreFullGCFrequency uint64 HotStoreFullGCFrequency uint64
// HotstoreMaxSpaceTarget suggests the max allowed space the hotstore can take.
// This is not a hard limit, it is possible for the hotstore to exceed the target
// for example if state grows massively between compactions. The splitstore
// will make a best effort to avoid overflowing the target and in practice should
// never overflow. This field is used when doing GC at the end of a compaction to
// adaptively choose moving GC
HotstoreMaxSpaceTarget uint64
// Moving GC will be triggered when total moving size exceeds
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold
HotstoreMaxSpaceThreshold uint64
// Safety buffer to prevent moving GC from overflowing disk.
// Moving GC will not occur when total moving size exceeds
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer
HotstoreMaxSpaceSafetyBuffer uint64
} }
// ChainAccessor allows the Splitstore to access the chain. It will most likely // ChainAccessor allows the Splitstore to access the chain. It will most likely
@ -165,6 +182,7 @@ type SplitStore struct {
compactionIndex int64 compactionIndex int64
pruneIndex int64 pruneIndex int64
onlineGCCnt int64
ctx context.Context ctx context.Context
cancel func() cancel func()
@ -195,6 +213,17 @@ type SplitStore struct {
// registered protectors // registered protectors
protectors []func(func(cid.Cid) error) error protectors []func(func(cid.Cid) error) error
// dag sizes measured during latest compaction
// logged and used for GC strategy
// protected by compaction lock
szWalk int64
szProtectedTxns int64
szKeys int64 // approximate, not counting keys protected when entering critical section
// protected by txnLk
szMarkedLiveRefs int64
} }
var _ bstore.Blockstore = (*SplitStore)(nil) var _ bstore.Blockstore = (*SplitStore)(nil)

View File

@ -95,7 +95,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
} }
defer visitor.Close() //nolint defer visitor.Close() //nolint
err = s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor, size := s.walkChain(curTs, boundaryEpoch, boundaryEpoch, visitor,
func(c cid.Cid) error { func(c cid.Cid) error {
if isUnitaryObject(c) { if isUnitaryObject(c) {
return errStopWalk return errStopWalk
@ -133,7 +133,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
return err return err
} }
log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt) log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt, "walk size", size)
write("--") write("--")
write("cold: %d missing: %d", *coldCnt, *missingCnt) write("cold: %d missing: %d", *coldCnt, *missingCnt)
write("DONE") write("DONE")

View File

@ -67,6 +67,7 @@ var (
const ( const (
batchSize = 16384 batchSize = 16384
cidKeySize = 128
) )
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
@ -199,9 +200,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
log.Debugf("marking %d live refs", len(cids)) log.Debugf("marking %d live refs", len(cids))
startMark := time.Now() startMark := time.Now()
szMarked := new(int64)
count := new(int32) count := new(int32)
visitor := newConcurrentVisitor() visitor := newConcurrentVisitor()
walkObject := func(c cid.Cid) error { walkObject := func(c cid.Cid) (int64, error) {
return s.walkObjectIncomplete(c, visitor, return s.walkObjectIncomplete(c, visitor,
func(c cid.Cid) error { func(c cid.Cid) error {
if isUnitaryObject(c) { if isUnitaryObject(c) {
@ -228,10 +231,12 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
// optimize the common case of single put // optimize the common case of single put
if len(cids) == 1 { if len(cids) == 1 {
if err := walkObject(cids[0]); err != nil { sz, err := walkObject(cids[0])
if err != nil {
log.Errorf("error marking tipset refs: %s", err) log.Errorf("error marking tipset refs: %s", err)
} }
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count)
atomic.AddInt64(szMarked, sz)
return return
} }
@ -243,9 +248,11 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
worker := func() error { worker := func() error {
for c := range workch { for c := range workch {
if err := walkObject(c); err != nil { sz, err := walkObject(c)
if err != nil {
return err return err
} }
atomic.AddInt64(szMarked, sz)
} }
return nil return nil
@ -268,7 +275,8 @@ func (s *SplitStore) markLiveRefs(cids []cid.Cid) {
log.Errorf("error marking tipset refs: %s", err) log.Errorf("error marking tipset refs: %s", err)
} }
log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count) log.Debugw("marking live refs done", "took", time.Since(startMark), "marked", *count, "size marked", *szMarked)
s.szMarkedLiveRefs += atomic.LoadInt64(szMarked)
} }
// transactionally protect a view // transactionally protect a view
@ -361,6 +369,7 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
log.Infow("protecting transactional references", "refs", len(txnRefs)) log.Infow("protecting transactional references", "refs", len(txnRefs))
count := 0 count := 0
sz := new(int64)
workch := make(chan cid.Cid, len(txnRefs)) workch := make(chan cid.Cid, len(txnRefs))
startProtect := time.Now() startProtect := time.Now()
@ -393,10 +402,11 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
worker := func() error { worker := func() error {
for c := range workch { for c := range workch {
err := s.doTxnProtect(c, markSet) szTxn, err := s.doTxnProtect(c, markSet)
if err != nil { if err != nil {
return xerrors.Errorf("error protecting transactional references to %s: %w", c, err) return xerrors.Errorf("error protecting transactional references to %s: %w", c, err)
} }
atomic.AddInt64(sz, szTxn)
} }
return nil return nil
} }
@ -409,16 +419,16 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSet) error {
if err := g.Wait(); err != nil { if err := g.Wait(); err != nil {
return err return err
} }
s.szProtectedTxns += atomic.LoadInt64(sz)
log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count) log.Infow("protecting transactional refs done", "took", time.Since(startProtect), "protected", count, "protected size", sz)
} }
} }
// transactionally protect a reference by walking the object and marking. // transactionally protect a reference by walking the object and marking.
// concurrent markings are short circuited by checking the markset. // concurrent markings are short circuited by checking the markset.
func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error) {
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return 0, err
} }
// Note: cold objects are deleted heaviest first, so the consituents of an object // Note: cold objects are deleted heaviest first, so the consituents of an object
@ -509,6 +519,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
// might be potentially inconsistent; abort compaction and notify the user to intervene. // might be potentially inconsistent; abort compaction and notify the user to intervene.
return xerrors.Errorf("checkpoint exists; aborting compaction") return xerrors.Errorf("checkpoint exists; aborting compaction")
} }
s.clearSizeMeasurements()
currentEpoch := curTs.Height() currentEpoch := curTs.Height()
boundaryEpoch := currentEpoch - CompactionBoundary boundaryEpoch := currentEpoch - CompactionBoundary
@ -598,7 +609,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
} }
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold) err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold)
if err != nil { if err != nil {
return xerrors.Errorf("error marking: %w", err) return xerrors.Errorf("error marking: %w", err)
} }
@ -638,7 +648,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
defer purgew.Close() //nolint:errcheck defer purgew.Close() //nolint:errcheck
// some stats for logging // some stats for logging
var hotCnt, coldCnt, purgeCnt int var hotCnt, coldCnt, purgeCnt int64
err = s.hot.ForEachKey(func(c cid.Cid) error { err = s.hot.ForEachKey(func(c cid.Cid) error {
// was it marked? // was it marked?
mark, err := markSet.Has(c) mark, err := markSet.Has(c)
@ -690,8 +700,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
log.Infow("cold collection done", "took", time.Since(startCollect)) log.Infow("cold collection done", "took", time.Since(startCollect))
log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt) log.Infow("compaction stats", "hot", hotCnt, "cold", coldCnt, "purge", purgeCnt)
stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(int64(hotCnt))) s.szKeys = hotCnt * cidKeySize
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(int64(coldCnt))) stats.Record(s.ctx, metrics.SplitstoreCompactionHot.M(hotCnt))
stats.Record(s.ctx, metrics.SplitstoreCompactionCold.M(coldCnt))
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return err
@ -773,8 +784,8 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
return xerrors.Errorf("error purging cold objects: %w", err) return xerrors.Errorf("error purging cold objects: %w", err)
} }
log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge)) log.Infow("purging cold objects from hotstore done", "took", time.Since(startPurge))
s.endCriticalSection() s.endCriticalSection()
log.Infow("critical section done", "total protected size", s.szProtectedTxns, "total marked live size", s.szMarkedLiveRefs)
if err := checkpoint.Close(); err != nil { if err := checkpoint.Close(); err != nil {
log.Warnf("error closing checkpoint: %s", err) log.Warnf("error closing checkpoint: %s", err)
@ -907,6 +918,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
copy(toWalk, ts.Cids()) copy(toWalk, ts.Cids())
walkCnt := new(int64) walkCnt := new(int64)
scanCnt := new(int64) scanCnt := new(int64)
szWalk := new(int64)
tsRef := func(blkCids []cid.Cid) (cid.Cid, error) { tsRef := func(blkCids []cid.Cid) (cid.Cid, error) {
return types.NewTipSetKey(blkCids...).Cid() return types.NewTipSetKey(blkCids...).Cid()
@ -942,48 +954,64 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
if err != nil { if err != nil {
return xerrors.Errorf("error computing cid reference to parent tipset") return xerrors.Errorf("error computing cid reference to parent tipset")
} }
if err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk); err != nil { sz, err := s.walkObjectIncomplete(pRef, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking parent tipset cid reference") return xerrors.Errorf("error walking parent tipset cid reference")
} }
atomic.AddInt64(szWalk, sz)
// message are retained if within the inclMsgs boundary // message are retained if within the inclMsgs boundary
if hdr.Height >= inclMsgs && hdr.Height > 0 { if hdr.Height >= inclMsgs && hdr.Height > 0 {
if inclMsgs < inclState { if inclMsgs < inclState {
// we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we // we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we
// synced from snapshot and have a long HotStoreMessageRetentionPolicy. // synced from snapshot and have a long HotStoreMessageRetentionPolicy.
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil { sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
} }
atomic.AddInt64(szWalk, sz)
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil { sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
} }
atomic.AddInt64(szWalk, sz)
} else { } else {
if err := s.walkObject(hdr.Messages, visitor, fHot); err != nil { sz, err = s.walkObject(hdr.Messages, visitor, fHot)
if err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
} }
atomic.AddInt64(szWalk, sz)
if err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot); err != nil { sz, err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot)
if err != nil {
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
} }
atomic.AddInt64(szWalk, sz)
} }
} }
// messages and receipts outside of inclMsgs are included in the cold store // messages and receipts outside of inclMsgs are included in the cold store
if hdr.Height < inclMsgs && hdr.Height > 0 { if hdr.Height < inclMsgs && hdr.Height > 0 {
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil { sz, err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err) return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
} }
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil { atomic.AddInt64(szWalk, sz)
sz, err = s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk)
if err != nil {
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err) return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
} }
atomic.AddInt64(szWalk, sz)
} }
// state is only retained if within the inclState boundary, with the exception of genesis // state is only retained if within the inclState boundary, with the exception of genesis
if hdr.Height >= inclState || hdr.Height == 0 { if hdr.Height >= inclState || hdr.Height == 0 {
if err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil { sz, err := s.walkObject(hdr.ParentStateRoot, visitor, fHot)
if err != nil {
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
} }
atomic.AddInt64(szWalk, sz)
atomic.AddInt64(scanCnt, 1) atomic.AddInt64(scanCnt, 1)
} }
@ -1001,9 +1029,11 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
if err != nil { if err != nil {
return xerrors.Errorf("error computing cid reference to parent tipset") return xerrors.Errorf("error computing cid reference to parent tipset")
} }
if err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk); err != nil { sz, err := s.walkObjectIncomplete(hRef, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking parent tipset cid reference") return xerrors.Errorf("error walking parent tipset cid reference")
} }
atomic.AddInt64(szWalk, sz)
for len(toWalk) > 0 { for len(toWalk) > 0 {
// walking can take a while, so check this with every opportunity // walking can take a while, so check this with every opportunity
@ -1047,123 +1077,129 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
} }
} }
log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt) log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt, "walk size", szWalk)
s.szWalk = atomic.LoadInt64(szWalk)
return nil return nil
} }
func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) error { func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid) error) (int64, error) {
var sz int64
visit, err := visitor.Visit(c) visit, err := visitor.Visit(c)
if err != nil { if err != nil {
return xerrors.Errorf("error visiting object: %w", err) return 0, xerrors.Errorf("error visiting object: %w", err)
} }
if !visit { if !visit {
return nil return sz, nil
} }
if err := f(c); err != nil { if err := f(c); err != nil {
if err == errStopWalk { if err == errStopWalk {
return nil return sz, nil
} }
return err return 0, err
} }
if c.Prefix().Codec != cid.DagCBOR { if c.Prefix().Codec != cid.DagCBOR {
return nil return sz, nil
} }
// check this before recursing // check this before recursing
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return 0, err
} }
var links []cid.Cid var links []cid.Cid
err = s.view(c, func(data []byte) error { err = s.view(c, func(data []byte) error {
sz += int64(len(data))
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
links = append(links, c) links = append(links, c)
}) })
}) })
if err != nil { if err != nil {
return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
} }
for _, c := range links { for _, c := range links {
err := s.walkObject(c, visitor, f) szLink, err := s.walkObject(c, visitor, f)
if err != nil { if err != nil {
return xerrors.Errorf("error walking link (cid: %s): %w", c, err) return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err)
} }
sz += szLink
} }
return nil return sz, nil
} }
// like walkObject, but the object may be potentially incomplete (references missing) // like walkObject, but the object may be potentially incomplete (references missing)
func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) error { func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, missing func(cid.Cid) error) (int64, error) {
var sz int64
visit, err := visitor.Visit(c) visit, err := visitor.Visit(c)
if err != nil { if err != nil {
return xerrors.Errorf("error visiting object: %w", err) return 0, xerrors.Errorf("error visiting object: %w", err)
} }
if !visit { if !visit {
return nil return sz, nil
} }
// occurs check -- only for DAGs // occurs check -- only for DAGs
if c.Prefix().Codec == cid.DagCBOR { if c.Prefix().Codec == cid.DagCBOR {
has, err := s.has(c) has, err := s.has(c)
if err != nil { if err != nil {
return xerrors.Errorf("error occur checking %s: %w", c, err) return 0, xerrors.Errorf("error occur checking %s: %w", c, err)
} }
if !has { if !has {
err = missing(c) err = missing(c)
if err == errStopWalk { if err == errStopWalk {
return nil return sz, nil
} }
return err return 0, err
} }
} }
if err := f(c); err != nil { if err := f(c); err != nil {
if err == errStopWalk { if err == errStopWalk {
return nil return sz, nil
} }
return err return 0, err
} }
if c.Prefix().Codec != cid.DagCBOR { if c.Prefix().Codec != cid.DagCBOR {
return nil return sz, nil
} }
// check this before recursing // check this before recursing
if err := s.checkClosing(); err != nil { if err := s.checkClosing(); err != nil {
return err return sz, err
} }
var links []cid.Cid var links []cid.Cid
err = s.view(c, func(data []byte) error { err = s.view(c, func(data []byte) error {
sz += int64(len(data))
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
links = append(links, c) links = append(links, c)
}) })
}) })
if err != nil { if err != nil {
return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err) return 0, xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
} }
for _, c := range links { for _, c := range links {
err := s.walkObjectIncomplete(c, visitor, f, missing) szLink, err := s.walkObjectIncomplete(c, visitor, f, missing)
if err != nil { if err != nil {
return xerrors.Errorf("error walking link (cid: %s): %w", c, err) return 0, xerrors.Errorf("error walking link (cid: %s): %w", c, err)
} }
sz += szLink
} }
return nil return sz, nil
} }
// internal version used during compaction and related operations // internal version used during compaction and related operations
@ -1429,8 +1465,9 @@ func (s *SplitStore) completeCompaction() error {
} }
s.compactType = none s.compactType = none
// Note: at this point we can start the splitstore; a compaction should run on // Note: at this point we can start the splitstore; base epoch is not
// the first head change, which will trigger gc on the hotstore. // incremented here so a compaction should run on the first head
// change, which will trigger gc on the hotstore.
// We don't mind the second (back-to-back) compaction as the head will // We don't mind the second (back-to-back) compaction as the head will
// have advanced during marking and coldset accumulation. // have advanced during marking and coldset accumulation.
return nil return nil
@ -1488,6 +1525,13 @@ func (s *SplitStore) completePurge(coldr *ColdSetReader, checkpoint *Checkpoint,
return nil return nil
} }
func (s *SplitStore) clearSizeMeasurements() {
s.szKeys = 0
s.szMarkedLiveRefs = 0
s.szProtectedTxns = 0
s.szWalk = 0
}
// I really don't like having this code, but we seem to have some occasional DAG references with // I really don't like having this code, but we seem to have some occasional DAG references with
// missing constituents. During testing in mainnet *some* of these references *sometimes* appeared // missing constituents. During testing in mainnet *some* of these references *sometimes* appeared
// after a little bit. // after a little bit.
@ -1528,7 +1572,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSet) {
missing = make(map[cid.Cid]struct{}) missing = make(map[cid.Cid]struct{})
for c := range towalk { for c := range towalk {
err := s.walkObjectIncomplete(c, visitor, _, err := s.walkObjectIncomplete(c, visitor,
func(c cid.Cid) error { func(c cid.Cid) error {
if isUnitaryObject(c) { if isUnitaryObject(c) {
return errStopWalk return errStopWalk

View File

@ -7,15 +7,61 @@ import (
bstore "github.com/filecoin-project/lotus/blockstore" bstore "github.com/filecoin-project/lotus/blockstore"
) )
const (
// Fraction of garbage in badger vlog for online GC traversal to collect garbage
AggressiveOnlineGCThreshold = 0.0001
)
func (s *SplitStore) gcHotAfterCompaction() { func (s *SplitStore) gcHotAfterCompaction() {
// Measure hotstore size, determine if we should do full GC, determine if we can do full GC.
// We should do full GC if
// FullGCFrequency is specified and compaction index matches frequency
// OR HotstoreMaxSpaceTarget is specified and total moving space is within 150 GB of target
// We can do full if
// HotstoreMaxSpaceTarget is not specified
// OR total moving space would not exceed 50 GB below target
//
// a) If we should not do full GC => online GC
// b) If we should do full GC and can => moving GC
// c) If we should do full GC and can't => aggressive online GC
getSize := func() int64 {
sizer, ok := s.hot.(bstore.BlockstoreSize)
if ok {
size, err := sizer.Size()
if err != nil {
log.Warnf("error getting hotstore size: %s, estimating empty hot store for targeting", err)
return 0
}
return size
}
log.Errorf("Could not measure hotstore size, assuming it is 0 bytes, which it is not")
return 0
}
hotSize := getSize()
copySizeApprox := s.szKeys + s.szMarkedLiveRefs + s.szProtectedTxns + s.szWalk
shouldTarget := s.cfg.HotstoreMaxSpaceTarget > 0 && hotSize+copySizeApprox > int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceThreshold)
shouldFreq := s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0
shouldDoFull := shouldTarget || shouldFreq
canDoFull := s.cfg.HotstoreMaxSpaceTarget == 0 || hotSize+copySizeApprox < int64(s.cfg.HotstoreMaxSpaceTarget)-int64(s.cfg.HotstoreMaxSpaceSafetyBuffer)
log.Debugw("approximating new hot store size", "key size", s.szKeys, "marked live refs", s.szMarkedLiveRefs, "protected txns", s.szProtectedTxns, "walked DAG", s.szWalk)
log.Infof("measured hot store size: %d, approximate new size: %d, should do full %t, can do full %t", hotSize, copySizeApprox, shouldDoFull, canDoFull)
var opts []bstore.BlockstoreGCOption var opts []bstore.BlockstoreGCOption
if s.cfg.HotStoreFullGCFrequency > 0 && s.compactionIndex%int64(s.cfg.HotStoreFullGCFrequency) == 0 { if shouldDoFull && canDoFull {
opts = append(opts, bstore.WithFullGC(true)) opts = append(opts, bstore.WithFullGC(true))
} else if shouldDoFull && !canDoFull {
log.Warnf("Attention! Estimated moving GC size %d is not within safety buffer %d of target max %d, performing aggressive online GC to attempt to bring hotstore size down safely", copySizeApprox, s.cfg.HotstoreMaxSpaceSafetyBuffer, s.cfg.HotstoreMaxSpaceTarget)
log.Warn("If problem continues you can 1) temporarily allocate more disk space to hotstore and 2) reflect in HotstoreMaxSpaceTarget OR trigger manual move with `lotus chain prune hot-moving`")
log.Warn("If problem continues and you do not have any more disk space you can run continue to manually trigger online GC at aggressive thresholds (< 0.01) with `lotus chain prune hot`")
opts = append(opts, bstore.WithThreshold(AggressiveOnlineGCThreshold))
} }
if err := s.gcBlockstore(s.hot, opts); err != nil { if err := s.gcBlockstore(s.hot, opts); err != nil {
log.Warnf("error garbage collecting hostore: %s", err) log.Warnf("error garbage collecting hostore: %s", err)
} }
log.Infof("measured hot store size after GC: %d", getSize())
} }
func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error { func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreGCOption) error {

View File

@ -101,7 +101,7 @@ func (s *SplitStore) doReify(c cid.Cid) {
defer s.txnLk.RUnlock() defer s.txnLk.RUnlock()
count := 0 count := 0
err := s.walkObjectIncomplete(c, newTmpVisitor(), _, err := s.walkObjectIncomplete(c, newTmpVisitor(),
func(c cid.Cid) error { func(c cid.Cid) error {
if isUnitaryObject(c) { if isUnitaryObject(c) {
return errStopWalk return errStopWalk

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -256,7 +256,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
//return nil, xerrors.Errorf("creating drand beacon: %w", err) //return nil, xerrors.Errorf("creating drand beacon: %w", err)
//} //}
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds)
if err != nil { if err != nil {
return nil, xerrors.Errorf("initing stmgr: %w", err) return nil, xerrors.Errorf("initing stmgr: %w", err)
} }

View File

@ -13,7 +13,11 @@ import (
) )
var ( var (
ReplaceByFeeRatioDefault = 1.25 ReplaceByFeePercentageMinimum types.Percent = 110
ReplaceByFeePercentageDefault types.Percent = 125
)
var (
MemPoolSizeLimitHiDefault = 30000 MemPoolSizeLimitHiDefault = 30000
MemPoolSizeLimitLoDefault = 20000 MemPoolSizeLimitLoDefault = 20000
PruneCooldownDefault = time.Minute PruneCooldownDefault = time.Minute
@ -60,9 +64,9 @@ func (mp *MessagePool) getConfig() *types.MpoolConfig {
} }
func validateConfg(cfg *types.MpoolConfig) error { func validateConfg(cfg *types.MpoolConfig) error {
if cfg.ReplaceByFeeRatio < ReplaceByFeeRatioDefault { if cfg.ReplaceByFeeRatio < ReplaceByFeePercentageMinimum {
return fmt.Errorf("'ReplaceByFeeRatio' is less than required %f < %f", return fmt.Errorf("'ReplaceByFeeRatio' is less than required %s < %s",
cfg.ReplaceByFeeRatio, ReplaceByFeeRatioDefault) cfg.ReplaceByFeeRatio, ReplaceByFeePercentageMinimum)
} }
if cfg.GasLimitOverestimation < 1 { if cfg.GasLimitOverestimation < 1 {
return fmt.Errorf("'GasLimitOverestimation' cannot be less than 1") return fmt.Errorf("'GasLimitOverestimation' cannot be less than 1")
@ -91,7 +95,7 @@ func DefaultConfig() *types.MpoolConfig {
return &types.MpoolConfig{ return &types.MpoolConfig{
SizeLimitHigh: MemPoolSizeLimitHiDefault, SizeLimitHigh: MemPoolSizeLimitHiDefault,
SizeLimitLow: MemPoolSizeLimitLoDefault, SizeLimitLow: MemPoolSizeLimitLoDefault,
ReplaceByFeeRatio: ReplaceByFeeRatioDefault, ReplaceByFeeRatio: ReplaceByFeePercentageDefault,
PruneCooldown: PruneCooldownDefault, PruneCooldown: PruneCooldownDefault,
GasLimitOverestimation: GasLimitOverestimation, GasLimitOverestimation: GasLimitOverestimation,
} }

View File

@ -47,10 +47,8 @@ var log = logging.Logger("messagepool")
var futureDebug = false var futureDebug = false
var rbfNumBig = types.NewInt(uint64((ReplaceByFeeRatioDefault - 1) * RbfDenom)) var rbfNumBig = types.NewInt(uint64(ReplaceByFeePercentageMinimum))
var rbfDenomBig = types.NewInt(RbfDenom) var rbfDenomBig = types.NewInt(100)
const RbfDenom = 256
var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second var RepublishInterval = time.Duration(10*build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second
@ -197,7 +195,13 @@ func newMsgSet(nonce uint64) *msgSet {
} }
func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount { func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount {
minPrice := types.BigAdd(curPrem, types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig)) minPrice := types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig)
return types.BigAdd(minPrice, types.NewInt(1))
}
func ComputeRBF(curPrem abi.TokenAmount, replaceByFeeRatio types.Percent) abi.TokenAmount {
rbfNumBig := types.NewInt(uint64(replaceByFeeRatio))
minPrice := types.BigDiv(types.BigMul(curPrem, rbfNumBig), rbfDenomBig)
return types.BigAdd(minPrice, types.NewInt(1)) return types.BigAdd(minPrice, types.NewInt(1))
} }

View File

@ -8,6 +8,7 @@ import (
"go.opencensus.io/trace" "go.opencensus.io/trace"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
@ -52,6 +53,12 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
sm.stlk.Unlock() sm.stlk.Unlock()
// First, try to find the tipset in the current chain. If found, we can avoid re-executing
// it.
if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found {
return st, rec, nil
}
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false) st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false)
if err != nil { if err != nil {
return cid.Undef, cid.Undef, err return cid.Undef, cid.Undef, err
@ -60,6 +67,51 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return st, rec, nil return st, rec, nil
} }
// Try to lookup a state & receipt CID for a given tipset by walking the chain instead of executing
// it. This will only successfully return the state/receipt CIDs if they're found in the state
// store.
//
// NOTE: This _won't_ recursively walk the receipt/state trees. It assumes that having the root
// implies having the rest of the tree. However, lotus generally makes that assumption anyways.
func tryLookupTipsetState(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) (cid.Cid, cid.Cid, bool) {
nextTs, err := cs.GetTipsetByHeight(ctx, ts.Height()+1, nil, false)
if err != nil {
// Nothing to see here. The requested height may be beyond the current head.
return cid.Undef, cid.Undef, false
}
// Make sure we're on the correct fork.
if nextTs.Parents() != ts.Key() {
// Also nothing to see here. This just means that the requested tipset is on a
// different fork.
return cid.Undef, cid.Undef, false
}
stateCid := nextTs.ParentState()
receiptCid := nextTs.ParentMessageReceipts()
// Make sure we have the parent state.
if hasState, err := cs.StateBlockstore().Has(ctx, stateCid); err != nil {
log.Errorw("failed to lookup state-root in blockstore", "cid", stateCid, "error", err)
return cid.Undef, cid.Undef, false
} else if !hasState {
// We have the chain but don't have the state. It looks like we need to try
// executing?
return cid.Undef, cid.Undef, false
}
// Make sure we have the receipts.
if hasReceipts, err := cs.ChainBlockstore().Has(ctx, receiptCid); err != nil {
log.Errorw("failed to lookup receipts in blockstore", "cid", receiptCid, "error", err)
return cid.Undef, cid.Undef, false
} else if !hasReceipts {
// If we don't have the receipts, re-execute and try again.
return cid.Undef, cid.Undef, false
}
return stateCid, receiptCid, true
}
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) { func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em, true) st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em, true)
return st, err return st, err

View File

@ -174,9 +174,16 @@ func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, err
func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) { func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root retCid := root
var err error
u := sm.stateMigrations[height] u := sm.stateMigrations[height]
if u != nil && u.upgrade != nil { if u != nil && u.upgrade != nil {
migCid, ok, err := u.migrationResultCache.Get(ctx, root)
if err == nil && ok {
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
return migCid, nil
} else if err != nil {
log.Errorw("failed to lookup previous migration result", "err", err)
}
startTime := time.Now() startTime := time.Now()
log.Warnw("STARTING migration", "height", height, "from", root) log.Warnw("STARTING migration", "height", height, "from", root)
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may // Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
@ -197,6 +204,11 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig
"to", retCid, "to", retCid,
"duration", time.Since(startTime), "duration", time.Since(startTime),
) )
// Only set if migration ran, we do not want a root => root mapping
if err := u.migrationResultCache.Store(ctx, root, retCid); err != nil {
log.Errorw("failed to store migration result", "err", err)
}
} }
return retCid, nil return retCid, nil

View File

@ -10,6 +10,7 @@ import (
"testing" "testing"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
ipldcbor "github.com/ipfs/go-ipld-cbor" ipldcbor "github.com/ipfs/go-ipld-cbor"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -35,6 +36,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/stmgr"
. "github.com/filecoin-project/lotus/chain/stmgr" . "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
@ -166,7 +168,7 @@ func TestForkHeightTriggers(t *testing.T) {
} }
return st.Flush(ctx) return st.Flush(ctx)
}}}, cg.BeaconSchedule()) }}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -284,7 +286,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
migrationCount++ migrationCount++
return root, nil return root, nil
}}}, cg.BeaconSchedule()) }}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -502,7 +504,7 @@ func TestForkPreMigration(t *testing.T) {
return nil return nil
}, },
}}}, }}},
}, cg.BeaconSchedule()) }, cg.BeaconSchedule(), datastore.NewMapDatastore())
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -576,6 +578,7 @@ func TestDisablePreMigration(t *testing.T) {
}}}, }}},
}, },
cg.BeaconSchedule(), cg.BeaconSchedule(),
datastore.NewMapDatastore(),
) )
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, sm.Start(context.Background())) require.NoError(t, sm.Start(context.Background()))
@ -603,3 +606,102 @@ func TestDisablePreMigration(t *testing.T) {
require.Equal(t, 1, len(counter)) require.Equal(t, 1, len(counter))
} }
func TestMigrtionCache(t *testing.T) {
logging.SetAllLoggers(logging.LevelInfo)
cg, err := gen.NewGenerator()
require.NoError(t, err)
counter := make(chan struct{}, 10)
metadataDs := datastore.NewMapDatastore()
sm, err := NewStateManager(
cg.ChainStore(),
consensus.NewTipSetExecutor(filcns.RewardFunc),
cg.StateManager().VMSys(),
UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor,
root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
counter <- struct{}{}
return root, nil
}},
},
cg.BeaconSchedule(),
metadataDs,
)
require.NoError(t, err)
require.NoError(t, sm.Start(context.Background()))
defer func() {
require.NoError(t, sm.Stop(context.Background()))
}()
inv := consensus.NewActorRegistry()
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{testActor{}})
inv.Register(actorstypes.Version0, nil, registry)
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewLegacyVM(ctx, vmopt)
require.NoError(t, err)
nvm.SetInvoker(inv)
return nvm, nil
})
cg.SetStateManager(sm)
for i := 0; i < 50; i++ {
_, err := cg.NextTipSet()
require.NoError(t, err)
}
ts, err := cg.ChainStore().GetTipsetByHeight(context.Background(), testForkHeight, nil, false)
require.NoError(t, err)
root, _, err := stmgr.ComputeState(context.Background(), sm, testForkHeight+1, []*types.Message{}, ts)
require.NoError(t, err)
t.Log(root)
require.Equal(t, 1, len(counter))
{
sm, err := NewStateManager(
cg.ChainStore(),
consensus.NewTipSetExecutor(filcns.RewardFunc),
cg.StateManager().VMSys(),
UpgradeSchedule{{
Network: network.Version1,
Height: testForkHeight,
Migration: func(_ context.Context, _ *StateManager, _ MigrationCache, _ ExecMonitor,
root cid.Cid, _ abi.ChainEpoch, _ *types.TipSet) (cid.Cid, error) {
counter <- struct{}{}
return root, nil
}},
},
cg.BeaconSchedule(),
metadataDs,
)
require.NoError(t, err)
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
nvm, err := vm.NewLegacyVM(ctx, vmopt)
require.NoError(t, err)
nvm.SetInvoker(inv)
return nvm, nil
})
ctx := context.Background()
base, _, err := sm.ExecutionTrace(ctx, ts)
require.NoError(t, err)
_, err = sm.HandleStateForks(context.Background(), base, ts.Height(), nil, ts)
require.NoError(t, err)
// Should not have increased as we should be using the cached results in the metadataDs
require.Equal(t, 1, len(counter))
}
}

View File

@ -2,10 +2,13 @@ package stmgr
import ( import (
"context" "context"
"fmt"
"sync" "sync"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
cbor "github.com/ipfs/go-ipld-cbor" cbor "github.com/ipfs/go-ipld-cbor"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -54,6 +57,44 @@ type migration struct {
upgrade MigrationFunc upgrade MigrationFunc
preMigrations []PreMigration preMigrations []PreMigration
cache *nv16.MemMigrationCache cache *nv16.MemMigrationCache
migrationResultCache *migrationResultCache
}
type migrationResultCache struct {
ds dstore.Batching
keyPrefix string
}
func (m *migrationResultCache) keyForMigration(root cid.Cid) dstore.Key {
kStr := fmt.Sprintf("%s/%s", m.keyPrefix, root)
return dstore.NewKey(kStr)
}
func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) {
k := m.keyForMigration(root)
bs, err := m.ds.Get(ctx, k)
if ipld.IsNotFound(err) {
return cid.Undef, false, nil
} else if err != nil {
return cid.Undef, false, xerrors.Errorf("error loading migration result: %w", err)
}
c, err := cid.Parse(bs)
if err != nil {
return cid.Undef, false, xerrors.Errorf("error parsing migration result: %w", err)
}
return c, true, nil
}
func (m *migrationResultCache) Store(ctx context.Context, root cid.Cid, resultCid cid.Cid) error {
k := m.keyForMigration(root)
if err := m.ds.Put(ctx, k, resultCid.Bytes()); err != nil {
return err
}
return nil
} }
type Executor interface { type Executor interface {
@ -103,7 +144,7 @@ type treeCache struct {
tree *state.StateTree tree *state.StateTree
} }
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule) (*StateManager, error) { func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching) (*StateManager, error) {
// If we have upgrades, make sure they're in-order and make sense. // If we have upgrades, make sure they're in-order and make sense.
if err := us.Validate(); err != nil { if err := us.Validate(); err != nil {
return nil, err return nil, err
@ -122,12 +163,18 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
upgrade: upgrade.Migration, upgrade: upgrade.Migration,
preMigrations: upgrade.PreMigrations, preMigrations: upgrade.PreMigrations,
cache: nv16.NewMemMigrationCache(), cache: nv16.NewMemMigrationCache(),
migrationResultCache: &migrationResultCache{
keyPrefix: fmt.Sprintf("/migration-cache/nv%d", upgrade.Network),
ds: metadataDs,
},
} }
stateMigrations[upgrade.Height] = migration stateMigrations[upgrade.Height] = migration
} }
if upgrade.Expensive { if upgrade.Expensive {
expensiveUpgrades[upgrade.Height] = struct{}{} expensiveUpgrades[upgrade.Height] = struct{}{}
} }
networkVersions = append(networkVersions, versionSpec{ networkVersions = append(networkVersions, versionSpec{
networkVersion: lastVersion, networkVersion: lastVersion,
atOrBelow: upgrade.Height, atOrBelow: upgrade.Height,
@ -155,8 +202,8 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
}, nil }, nil
} }
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor) (*StateManager, error) { func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us, b) sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -4,8 +4,8 @@ import (
"context" "context"
"os" "os"
"strconv" "strconv"
"sync"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
var DefaultChainIndexCacheSize = 32 << 10 var DefaultChainIndexCacheSize = 32 << 15
func init() { func init() {
if s := os.Getenv("LOTUS_CHAIN_INDEX_CACHE"); s != "" { if s := os.Getenv("LOTUS_CHAIN_INDEX_CACHE"); s != "" {
@ -27,7 +27,8 @@ func init() {
} }
type ChainIndex struct { type ChainIndex struct {
skipCache *lru.ARCCache indexCacheLk sync.Mutex
indexCache map[types.TipSetKey]*lbEntry
loadTipSet loadTipSetFunc loadTipSet loadTipSetFunc
@ -36,17 +37,14 @@ type ChainIndex struct {
type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error) type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error)
func NewChainIndex(lts loadTipSetFunc) *ChainIndex { func NewChainIndex(lts loadTipSetFunc) *ChainIndex {
sc, _ := lru.NewARC(DefaultChainIndexCacheSize)
return &ChainIndex{ return &ChainIndex{
skipCache: sc, indexCache: make(map[types.TipSetKey]*lbEntry, DefaultChainIndexCacheSize),
loadTipSet: lts, loadTipSet: lts,
skipLength: 20, skipLength: 20,
} }
} }
type lbEntry struct { type lbEntry struct {
ts *types.TipSet
parentHeight abi.ChainEpoch
targetHeight abi.ChainEpoch targetHeight abi.ChainEpoch
target types.TipSetKey target types.TipSetKey
} }
@ -58,25 +56,36 @@ func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet,
rounded, err := ci.roundDown(ctx, from) rounded, err := ci.roundDown(ctx, from)
if err != nil { if err != nil {
return nil, err return nil, xerrors.Errorf("failed to round down: %w", err)
} }
ci.indexCacheLk.Lock()
defer ci.indexCacheLk.Unlock()
cur := rounded.Key() cur := rounded.Key()
for { for {
cval, ok := ci.skipCache.Get(cur) lbe, ok := ci.indexCache[cur]
if !ok { if !ok {
fc, err := ci.fillCache(ctx, cur) fc, err := ci.fillCache(ctx, cur)
if err != nil { if err != nil {
return nil, err return nil, xerrors.Errorf("failed to fill cache: %w", err)
} }
cval = fc lbe = fc
} }
lbe := cval.(*lbEntry) if to == lbe.targetHeight {
if lbe.ts.Height() == to || lbe.parentHeight < to { ts, err := ci.loadTipSet(ctx, lbe.target)
return lbe.ts, nil if err != nil {
} else if to > lbe.targetHeight { return nil, xerrors.Errorf("failed to load tipset: %w", err)
return ci.walkBack(ctx, lbe.ts, to) }
return ts, nil
}
if to > lbe.targetHeight {
ts, err := ci.loadTipSet(ctx, cur)
if err != nil {
return nil, xerrors.Errorf("failed to load tipset: %w", err)
}
return ci.walkBack(ctx, ts, to)
} }
cur = lbe.target cur = lbe.target
@ -87,16 +96,17 @@ func (ci *ChainIndex) GetTipsetByHeightWithoutCache(ctx context.Context, from *t
return ci.walkBack(ctx, from, to) return ci.walkBack(ctx, from, to)
} }
// Caller must hold indexCacheLk
func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) { func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) {
ts, err := ci.loadTipSet(ctx, tsk) ts, err := ci.loadTipSet(ctx, tsk)
if err != nil { if err != nil {
return nil, err return nil, xerrors.Errorf("failed to load tipset: %w", err)
} }
if ts.Height() == 0 { if ts.Height() == 0 {
return &lbEntry{ return &lbEntry{
ts: ts, targetHeight: 0,
parentHeight: 0, target: tsk,
}, nil }, nil
} }
@ -124,12 +134,10 @@ func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEn
} }
lbe := &lbEntry{ lbe := &lbEntry{
ts: ts,
parentHeight: parent.Height(),
targetHeight: skipTarget.Height(), targetHeight: skipTarget.Height(),
target: skipTarget.Key(), target: skipTarget.Key(),
} }
ci.skipCache.Add(tsk, lbe) ci.indexCache[tsk] = lbe
return lbe, nil return lbe, nil
} }
@ -144,7 +152,7 @@ func (ci *ChainIndex) roundDown(ctx context.Context, ts *types.TipSet) (*types.T
rounded, err := ci.walkBack(ctx, ts, target) rounded, err := ci.walkBack(ctx, ts, target)
if err != nil { if err != nil {
return nil, err return nil, xerrors.Errorf("failed to walk back: %w", err)
} }
return rounded, nil return rounded, nil
@ -164,7 +172,7 @@ func (ci *ChainIndex) walkBack(ctx context.Context, from *types.TipSet, to abi.C
for { for {
pts, err := ci.loadTipSet(ctx, ts.Parents()) pts, err := ci.loadTipSet(ctx, ts.Parents())
if err != nil { if err != nil {
return nil, err return nil, xerrors.Errorf("failed to load tipset: %w", err)
} }
if to > pts.Height() { if to > pts.Height() {

View File

@ -237,6 +237,26 @@ func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.C
return blscids, secpkcids, nil return blscids, secpkcids, nil
} }
func (cs *ChainStore) ReadReceipts(ctx context.Context, root cid.Cid) ([]types.MessageReceipt, error) {
a, err := blockadt.AsArray(cs.ActorStore(ctx), root)
if err != nil {
return nil, err
}
receipts := make([]types.MessageReceipt, 0, a.Length())
var rcpt types.MessageReceipt
if err := a.ForEach(&rcpt, func(i int64) error {
if int64(len(receipts)) != i {
return xerrors.Errorf("missing receipt %d", i)
}
receipts = append(receipts, rcpt)
return nil
}); err != nil {
return nil, err
}
return receipts, nil
}
func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
blscids, secpkcids, err := cs.ReadMsgMetaCids(ctx, b.Messages) blscids, secpkcids, err := cs.ReadMsgMetaCids(ctx, b.Messages)
if err != nil { if err != nil {

View File

@ -196,7 +196,8 @@ func TestChainExportImportFull(t *testing.T) {
} }
nbs := blockstore.NewMemorySync() nbs := blockstore.NewMemorySync()
cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) ds := datastore.NewMapDatastore()
cs := store.NewChainStore(nbs, nbs, ds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck defer cs.Close() //nolint:errcheck
root, err := cs.Import(context.TODO(), buf) root, err := cs.Import(context.TODO(), buf)
@ -213,7 +214,7 @@ func TestChainExportImportFull(t *testing.T) {
t.Fatal("imported chain differed from exported chain") t.Fatal("imported chain differed from exported chain")
} }
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule()) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -44,14 +44,6 @@ type EthTx struct {
S EthBigInt `json:"s"` S EthBigInt `json:"s"`
} }
func (tx *EthTx) Reward(blkBaseFee big.Int) EthBigInt {
availablePriorityFee := big.Sub(big.Int(tx.MaxFeePerGas), blkBaseFee)
if big.Cmp(big.Int(tx.MaxPriorityFeePerGas), availablePriorityFee) <= 0 {
return tx.MaxPriorityFeePerGas
}
return EthBigInt(availablePriorityFee)
}
type EthTxArgs struct { type EthTxArgs struct {
ChainID int `json:"chainId"` ChainID int `json:"chainId"`
Nonce int `json:"nonce"` Nonce int `json:"nonce"`

View File

@ -295,17 +295,21 @@ func EthAddressFromPubKey(pubk []byte) ([]byte, error) {
return ethAddr, nil return ethAddr, nil
} }
var maskedIDPrefix = [20 - 8]byte{0xff}
func IsEthAddress(addr address.Address) bool { func IsEthAddress(addr address.Address) bool {
if addr.Protocol() != address.Delegated { if addr.Protocol() != address.Delegated {
return false return false
} }
payload := addr.Payload() payload := addr.Payload()
namespace, _, err := varint.FromUvarint(payload) namespace, offset, err := varint.FromUvarint(payload)
if err != nil { if err != nil {
return false return false
} }
return namespace == builtintypes.EthereumAddressManagerActorID payload = payload[offset:]
return namespace == builtintypes.EthereumAddressManagerActorID && len(payload) == 20 && !bytes.HasPrefix(payload, maskedIDPrefix[:])
} }
func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) { func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) {
@ -326,9 +330,17 @@ func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) {
return EthAddress{}, xerrors.Errorf("invalid delegated address namespace in: %s", addr) return EthAddress{}, xerrors.Errorf("invalid delegated address namespace in: %s", addr)
} }
payload = payload[n:] payload = payload[n:]
if namespace == builtintypes.EthereumAddressManagerActorID { if namespace != builtintypes.EthereumAddressManagerActorID {
return CastEthAddress(payload) return EthAddress{}, ErrInvalidAddress
} }
ethAddr, err := CastEthAddress(payload)
if err != nil {
return EthAddress{}, err
}
if ethAddr.IsMaskedID() {
return EthAddress{}, xerrors.Errorf("f410f addresses cannot embed masked-ID payloads: %s", ethAddr)
}
return ethAddr, nil
} }
return EthAddress{}, ErrInvalidAddress return EthAddress{}, ErrInvalidAddress
} }
@ -376,8 +388,7 @@ func (ea *EthAddress) UnmarshalJSON(b []byte) error {
} }
func (ea EthAddress) IsMaskedID() bool { func (ea EthAddress) IsMaskedID() bool {
idmask := [12]byte{0xff} return bytes.HasPrefix(ea[:], maskedIDPrefix[:])
return bytes.Equal(ea[:12], idmask[:])
} }
func (ea EthAddress) ToFilecoinAddress() (address.Address, error) { func (ea EthAddress) ToFilecoinAddress() (address.Address, error) {

View File

@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin"
) )
type TestCase struct { type TestCase struct {
@ -178,6 +179,20 @@ func TestParseEthAddr(t *testing.T) {
} }
} }
func TestMaskedIDInF4(t *testing.T) {
addr, err := address.NewIDAddress(100)
require.NoError(t, err)
eaddr, err := EthAddressFromFilecoinAddress(addr)
require.NoError(t, err)
badaddr, err := address.NewDelegatedAddress(builtin.EthereumAddressManagerActorID, eaddr[:])
require.NoError(t, err)
_, err = EthAddressFromFilecoinAddress(badaddr)
require.Error(t, err)
}
func TestUnmarshalEthCall(t *testing.T) { func TestUnmarshalEthCall(t *testing.T) {
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":""}` data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":""}`

View File

@ -219,4 +219,17 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version)
return nil return nil
} }
// EffectiveGasPremium returns the effective gas premium claimable by the miner
// given the supplied base fee.
//
// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the
// specified premium.
func (m *Message) EffectiveGasPremium(baseFee abi.TokenAmount) abi.TokenAmount {
available := big.Sub(m.GasFeeCap, baseFee)
if big.Cmp(m.GasPremium, available) <= 0 {
return m.GasPremium
}
return available
}
const TestGasLimit = 100e6 const TestGasLimit = 100e6

View File

@ -10,7 +10,7 @@ type MpoolConfig struct {
PriorityAddrs []address.Address PriorityAddrs []address.Address
SizeLimitHigh int SizeLimitHigh int
SizeLimitLow int SizeLimitLow int
ReplaceByFeeRatio float64 ReplaceByFeeRatio Percent
PruneCooldown time.Duration PruneCooldown time.Duration
GasLimitOverestimation float64 GasLimitOverestimation float64
} }

39
chain/types/percent.go Normal file
View File

@ -0,0 +1,39 @@
package types
import (
"fmt"
"math"
"strconv"
"golang.org/x/xerrors"
)
// Percent stores a signed percentage as an int64. When converted to a string (or json), it's stored
// as a decimal with two places (e.g., 100% -> 1.00).
type Percent int64
func (p Percent) String() string {
abs := p
sign := ""
if abs < 0 {
abs = -abs
sign = "-"
}
return fmt.Sprintf(`%s%d.%d`, sign, abs/100, abs%100)
}
func (p Percent) MarshalJSON() ([]byte, error) {
return []byte(p.String()), nil
}
func (p *Percent) UnmarshalJSON(b []byte) error {
flt, err := strconv.ParseFloat(string(b)+"e2", 64)
if err != nil {
return xerrors.Errorf("unable to parse ratio %s: %w", string(b), err)
}
if math.Trunc(flt) != flt {
return xerrors.Errorf("ratio may only have two decimals: %s", string(b))
}
*p = Percent(flt)
return nil
}

View File

@ -0,0 +1,34 @@
package types
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestPercent(t *testing.T) {
for _, tc := range []struct {
p Percent
s string
}{
{100, "1.0"},
{111, "1.11"},
{12, "0.12"},
{-12, "-0.12"},
{1012, "10.12"},
{-1012, "-10.12"},
{0, "0.0"},
} {
tc := tc
t.Run(fmt.Sprintf("%d <> %s", tc.p, tc.s), func(t *testing.T) {
m, err := tc.p.MarshalJSON()
require.NoError(t, err)
require.Equal(t, tc.s, string(m))
var p Percent
require.NoError(t, p.UnmarshalJSON([]byte(tc.s)))
require.Equal(t, tc.p, p)
})
}
}

View File

@ -234,6 +234,10 @@ func (ts *TipSet) MinTicketBlock() *BlockHeader {
return min return min
} }
func (ts *TipSet) ParentMessageReceipts() cid.Cid {
return ts.blks[0].ParentMessageReceipts
}
func (ts *TipSet) ParentState() cid.Cid { func (ts *TipSet) ParentState() cid.Cid {
return ts.blks[0].ParentStateRoot return ts.blks[0].ParentStateRoot
} }

View File

@ -35,6 +35,7 @@ var EvmCmd = &cli.Command{
EvmGetInfoCmd, EvmGetInfoCmd,
EvmCallSimulateCmd, EvmCallSimulateCmd,
EvmGetContractAddress, EvmGetContractAddress,
EvmGetBytecode,
}, },
} }
@ -486,3 +487,51 @@ func ethAddrFromFilecoinAddress(ctx context.Context, addr address.Address, fnapi
return ethAddr, faddr, nil return ethAddr, faddr, nil
} }
var EvmGetBytecode = &cli.Command{
Name: "bytecode",
Usage: "Write the bytecode of a smart contract to a file",
ArgsUsage: "[contract-address] [file-name]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "bin",
Usage: "write the bytecode as raw binary and don't hex-encode",
},
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 {
return IncorrectNumArgs(cctx)
}
contractAddr, err := ethtypes.ParseEthAddress(cctx.Args().Get(0))
if err != nil {
return err
}
fileName := cctx.Args().Get(1)
api, closer, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
code, err := api.EthGetCode(ctx, contractAddr, "latest")
if err != nil {
return err
}
if !cctx.Bool("bin") {
newCode := make([]byte, hex.EncodedLen(len(code)))
hex.Encode(newCode, code)
code = newCode
}
if err := os.WriteFile(fileName, code, 0o666); err != nil {
return xerrors.Errorf("failed to write bytecode to file %s: %w", fileName, err)
}
fmt.Printf("Code for %s written to %s\n", contractAddr, fileName)
return nil
},
}

View File

@ -461,7 +461,12 @@ var MpoolReplaceCmd = &cli.Command{
msg := found.Message msg := found.Message
if cctx.Bool("auto") { if cctx.Bool("auto") {
minRBF := messagepool.ComputeMinRBF(msg.GasPremium) cfg, err := api.MpoolGetConfig(ctx)
if err != nil {
return xerrors.Errorf("failed to lookup the message pool config: %w", err)
}
defaultRBF := messagepool.ComputeRBF(msg.GasPremium, cfg.ReplaceByFeeRatio)
var mss *lapi.MessageSendSpec var mss *lapi.MessageSendSpec
if cctx.IsSet("fee-limit") { if cctx.IsSet("fee-limit") {
@ -482,7 +487,7 @@ var MpoolReplaceCmd = &cli.Command{
return xerrors.Errorf("failed to estimate gas values: %w", err) return xerrors.Errorf("failed to estimate gas values: %w", err)
} }
msg.GasPremium = big.Max(retm.GasPremium, minRBF) msg.GasPremium = big.Max(retm.GasPremium, defaultRBF)
msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium) msg.GasFeeCap = big.Max(retm.GasFeeCap, msg.GasPremium)
mff := func() (abi.TokenAmount, error) { mff := func() (abi.TokenAmount, error) {

View File

@ -15,6 +15,7 @@ import (
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock" "github.com/filecoin-project/lotus/chain/types/mock"
"github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/chain/wallet"
@ -298,6 +299,7 @@ func TestReplace(t *testing.T) {
mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil), mockApi.EXPECT().ChainGetMessage(ctx, sm.Cid()).Return(&sm.Message, nil),
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil), mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
mockApi.EXPECT().MpoolGetConfig(ctx).Return(messagepool.DefaultConfig(), nil),
// use gomock.any to match the message in expected api calls // use gomock.any to match the message in expected api calls
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument // since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil), mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
@ -342,6 +344,7 @@ func TestReplace(t *testing.T) {
gomock.InOrder( gomock.InOrder(
mockApi.EXPECT().ChainHead(ctx).Return(nil, nil), mockApi.EXPECT().ChainHead(ctx).Return(nil, nil),
mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil), mockApi.EXPECT().MpoolPending(ctx, types.EmptyTSK).Return([]*types.SignedMessage{sm}, nil),
mockApi.EXPECT().MpoolGetConfig(ctx).Return(messagepool.DefaultConfig(), nil),
// use gomock.any to match the message in expected api calls // use gomock.any to match the message in expected api calls
// since the replace function modifies the message between calls, it would be pointless to try to match the exact argument // since the replace function modifies the message between calls, it would be pointless to try to match the exact argument
mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil), mockApi.EXPECT().GasEstimateMessageGas(ctx, gomock.Any(), &mss, types.EmptyTSK).Return(&sm.Message, nil),
@ -538,7 +541,7 @@ func TestConfig(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 1234567, SizeLimitLow: 6, ReplaceByFeeRatio: 0.25} mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 1234567, SizeLimitLow: 6, ReplaceByFeeRatio: types.Percent(25)}
gomock.InOrder( gomock.InOrder(
mockApi.EXPECT().MpoolGetConfig(ctx).Return(mpoolCfg, nil), mockApi.EXPECT().MpoolGetConfig(ctx).Return(mpoolCfg, nil),
) )
@ -566,7 +569,7 @@ func TestConfig(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 234567, SizeLimitLow: 3, ReplaceByFeeRatio: 0.33} mpoolCfg := &types.MpoolConfig{PriorityAddrs: []address.Address{senderAddr}, SizeLimitHigh: 234567, SizeLimitLow: 3, ReplaceByFeeRatio: types.Percent(33)}
gomock.InOrder( gomock.InOrder(
mockApi.EXPECT().MpoolSetConfig(ctx, mpoolCfg).Return(nil), mockApi.EXPECT().MpoolSetConfig(ctx, mpoolCfg).Return(nil),
) )

View File

@ -229,7 +229,7 @@ var importBenchCmd = &cli.Command{
defer cs.Close() //nolint:errcheck defer cs.Close() //nolint:errcheck
// TODO: We need to supply the actual beacon after v14 // TODO: We need to supply the actual beacon after v14
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil) stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs)
if err != nil { if err != nil {
return err return err
} }

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/proof"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
@ -640,7 +641,42 @@ It will not send any messages to the chain.`,
if err != nil { if err != nil {
return err return err
} }
jr, err := json.Marshal(res)
//convert sector information into easily readable information
type PoStPartition struct {
Index uint64
Skipped []uint64
}
type SubmitWindowedPoStParams struct {
Deadline uint64
Partitions []PoStPartition
Proofs []proof.PoStProof
ChainCommitEpoch abi.ChainEpoch
ChainCommitRand abi.Randomness
}
var postParams []SubmitWindowedPoStParams
for _, i := range res {
var postParam SubmitWindowedPoStParams
postParam.Deadline = i.Deadline
for id, part := range i.Partitions {
postParam.Partitions[id].Index = part.Index
count, err := part.Skipped.Count()
if err != nil {
return err
}
sectors, err := part.Skipped.All(count)
if err != nil {
return err
}
postParam.Partitions[id].Skipped = sectors
}
postParam.Proofs = i.Proofs
postParam.ChainCommitEpoch = i.ChainCommitEpoch
postParam.ChainCommitRand = i.ChainCommitRand
postParams = append(postParams, postParam)
}
jr, err := json.MarshalIndent(postParams, "", " ")
if err != nil { if err != nil {
return err return err
} }

View File

@ -513,7 +513,7 @@ var chainBalanceStateCmd = &cli.Command{
cst := cbor.NewCborStore(bs) cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst) store := adt.WrapStore(ctx, cst)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
if err != nil { if err != nil {
return err return err
} }
@ -737,7 +737,7 @@ var chainPledgeCmd = &cli.Command{
cst := cbor.NewCborStore(bs) cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst) store := adt.WrapStore(ctx, cst)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
if err != nil { if err != nil {
return err return err
} }

View File

@ -111,7 +111,7 @@ var gasTraceCmd = &cli.Command{
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
if err != nil { if err != nil {
return err return err
} }
@ -212,7 +212,7 @@ var replayOfflineCmd = &cli.Command{
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
if err != nil { if err != nil {
return err return err
} }

View File

@ -90,7 +90,7 @@ var invariantsCmd = &cli.Command{
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
if err != nil { if err != nil {
return err return err
} }

View File

@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -121,7 +122,8 @@ var migrationsCmd = &cli.Command{
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) // Note: we use a map datastore for the metadata to avoid writing / using cached migration results in the metadata store
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, datastore.NewMapDatastore())
if err != nil { if err != nil {
return err return err
} }

View File

@ -308,7 +308,7 @@ to reduce the number of decode operations performed by caching the decoded objec
} }
tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc) tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
if err != nil { if err != nil {
return err return err
} }

View File

@ -106,7 +106,7 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) {
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err)
} }
sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil) sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil, nd.MetadataDS)
if err != nil { if err != nil {
return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err)
} }
@ -125,7 +125,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil) sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS)
if err != nil { if err != nil {
return nil, xerrors.Errorf("creating state manager: %w", err) return nil, xerrors.Errorf("creating state manager: %w", err)
} }

View File

@ -201,7 +201,7 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch
if err != nil { if err != nil {
return err return err
} }
sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil) sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS)
if err != nil { if err != nil {
return err return err
} }

View File

@ -540,7 +540,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
} }
// TODO: We need to supply the actual beacon after v14 // TODO: We need to supply the actual beacon after v14
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
if err != nil { if err != nil {
return err return err
} }

View File

@ -108,7 +108,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil) cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil)
tse = consensus.NewTipSetExecutor(filcns.RewardFunc) tse = consensus.NewTipSetExecutor(filcns.RewardFunc)
sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil) sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds)
) )
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -2879,7 +2879,7 @@ Response:
], ],
"SizeLimitHigh": 123, "SizeLimitHigh": 123,
"SizeLimitLow": 123, "SizeLimitLow": 123,
"ReplaceByFeeRatio": 12.3, "ReplaceByFeeRatio": 1.23,
"PruneCooldown": 60000000000, "PruneCooldown": 60000000000,
"GasLimitOverestimation": 12.3 "GasLimitOverestimation": 12.3
} }
@ -3167,7 +3167,7 @@ Inputs:
], ],
"SizeLimitHigh": 123, "SizeLimitHigh": 123,
"SizeLimitLow": 123, "SizeLimitLow": 123,
"ReplaceByFeeRatio": 12.3, "ReplaceByFeeRatio": 1.23,
"PruneCooldown": 60000000000, "PruneCooldown": 60000000000,
"GasLimitOverestimation": 12.3 "GasLimitOverestimation": 12.3
} }

View File

@ -3882,7 +3882,7 @@ Response:
], ],
"SizeLimitHigh": 123, "SizeLimitHigh": 123,
"SizeLimitLow": 123, "SizeLimitLow": 123,
"ReplaceByFeeRatio": 12.3, "ReplaceByFeeRatio": 1.23,
"PruneCooldown": 60000000000, "PruneCooldown": 60000000000,
"GasLimitOverestimation": 12.3 "GasLimitOverestimation": 12.3
} }
@ -4170,7 +4170,7 @@ Inputs:
], ],
"SizeLimitHigh": 123, "SizeLimitHigh": 123,
"SizeLimitLow": 123, "SizeLimitLow": 123,
"ReplaceByFeeRatio": 12.3, "ReplaceByFeeRatio": 1.23,
"PruneCooldown": 60000000000, "PruneCooldown": 60000000000,
"GasLimitOverestimation": 12.3 "GasLimitOverestimation": 12.3
} }

View File

@ -2647,6 +2647,7 @@ COMMANDS:
stat Print eth/filecoin addrs and code cid stat Print eth/filecoin addrs and code cid
call Simulate an eth contract call call Simulate an eth contract call
contract-address Generate contract address from smart contract code contract-address Generate contract address from smart contract code
bytecode Write the bytecode of a smart contract to a file
help, h Shows a list of commands or help for one command help, h Shows a list of commands or help for one command
OPTIONS: OPTIONS:
@ -2721,6 +2722,19 @@ OPTIONS:
``` ```
### lotus evm bytecode
```
NAME:
lotus evm bytecode - Write the bytecode of a smart contract to a file
USAGE:
lotus evm bytecode [command options] [contract-address] [file-name]
OPTIONS:
--bin write the bytecode as raw binary and don't hex-encode (default: false)
```
## lotus net ## lotus net
``` ```
NAME: NAME:

View File

@ -191,7 +191,7 @@
[Chainstore] [Chainstore]
# type: bool # type: bool
# env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE # env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE
#EnableSplitstore = false #EnableSplitstore = true
[Chainstore.Splitstore] [Chainstore.Splitstore]
# ColdStoreType specifies the type of the coldstore. # ColdStoreType specifies the type of the coldstore.
@ -199,7 +199,7 @@
# #
# type: string # type: string
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE
#ColdStoreType = "messages" #ColdStoreType = "discard"
# HotStoreType specifies the type of the hotstore. # HotStoreType specifies the type of the hotstore.
# Only currently supported value is "badger". # Only currently supported value is "badger".
@ -230,6 +230,35 @@
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY
#HotStoreFullGCFrequency = 20 #HotStoreFullGCFrequency = 20
# HotStoreMaxSpaceTarget sets a target max disk size for the hotstore. Splitstore GC
# will run moving GC if disk utilization gets within a threshold (150 GB) of the target.
# Splitstore GC will NOT run moving GC if the total size of the move would get
# within 50 GB of the target, and instead will run a more aggressive online GC.
# If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore
# GC will trigger moving GC if either configuration condition is met.
# A reasonable minimum is 2x fully GCed hotstore size + 50 G buffer.
# At this minimum size moving GC happens every time, any smaller and moving GC won't
# be able to run. In spring 2023 this minimum is ~550 GB.
#
# type: uint64
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETARGET
#HotStoreMaxSpaceTarget = 0
# When HotStoreMaxSpaceTarget is set Moving GC will be triggered when total moving size
# exceeds HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold
#
# type: uint64
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACETHRESHOLD
#HotStoreMaxSpaceThreshold = 150000000000
# Safety buffer to prevent moving GC from overflowing disk when HotStoreMaxSpaceTarget
# is set. Moving GC will not occur when total moving size exceeds
# HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer
#
# type: uint64
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMAXSPACESAFETYBUFFER
#HotstoreMaxSpaceSafetyBuffer = 50000000000
[Cluster] [Cluster]
# EXPERIMENTAL. config to enabled node cluster with raft consensus # EXPERIMENTAL. config to enabled node cluster with raft consensus

View File

@ -27,6 +27,7 @@ import (
_ "github.com/filecoin-project/lotus/lib/sigs/secp" _ "github.com/filecoin-project/lotus/lib/sigs/secp"
"github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
const ( const (
@ -59,18 +60,22 @@ type TargetAPI interface {
ChainPutObj(context.Context, blocks.Block) error ChainPutObj(context.Context, blocks.Block) error
ChainGetGenesis(context.Context) (*types.TipSet, error) ChainGetGenesis(context.Context) (*types.TipSet, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error)
MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error) MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)

View File

@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
func (gw *Node) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { func (gw *Node) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
@ -187,6 +188,13 @@ func (gw *Node) GasEstimateMessageGas(ctx context.Context, msg *types.Message, s
return gw.target.GasEstimateMessageGas(ctx, msg, spec, tsk) return gw.target.GasEstimateMessageGas(ctx, msg, spec, tsk)
} }
func (gw *Node) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return 0, err
}
return gw.target.MpoolGetNonce(ctx, addr)
}
func (gw *Node) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) { func (gw *Node) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return cid.Cid{}, err return cid.Cid{}, err
@ -248,6 +256,16 @@ func (gw *Node) StateAccountKey(ctx context.Context, addr address.Address, tsk t
return gw.target.StateAccountKey(ctx, addr, tsk) return gw.target.StateAccountKey(ctx, addr, tsk)
} }
func (gw *Node) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err
}
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
return nil, err
}
return gw.target.StateCall(ctx, msg, tsk)
}
func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return api.DealCollateralBounds{}, err return api.DealCollateralBounds{}, err
@ -258,6 +276,16 @@ func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.
return gw.target.StateDealProviderCollateralBounds(ctx, size, verified, tsk) return gw.target.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
} }
func (gw *Node) StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err
}
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
return nil, err
}
return gw.target.StateDecodeParams(ctx, toAddr, method, params, tsk)
}
func (gw *Node) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { func (gw *Node) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err return nil, err
@ -308,6 +336,13 @@ func (gw *Node) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, t
return gw.target.StateMarketStorageDeal(ctx, dealId, tsk) return gw.target.StateMarketStorageDeal(ctx, dealId, tsk)
} }
func (gw *Node) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return *new(dtypes.NetworkName), err
}
return gw.target.StateNetworkName(ctx)
}
func (gw *Node) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { func (gw *Node) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return network.VersionMax, err return network.VersionMax, err

View File

@ -3,6 +3,7 @@ package itests
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"testing" "testing"
"time" "time"
@ -48,13 +49,14 @@ func TestEthBlockHashesCorrect_MultiBlockTipset(t *testing.T) {
// let the chain run a little bit longer to minimise the chance of reorgs // let the chain run a little bit longer to minimise the chance of reorgs
n2.WaitTillChain(ctx, kit.HeightAtLeast(head.Height()+50)) n2.WaitTillChain(ctx, kit.HeightAtLeast(head.Height()+50))
head, err = n2.ChainHead(context.Background())
require.NoError(t, err)
for i := 1; i <= int(head.Height()); i++ { for i := 1; i <= int(head.Height()); i++ {
hex := fmt.Sprintf("0x%x", i) hex := fmt.Sprintf("0x%x", i)
ethBlockA, err := n2.EthGetBlockByNumber(ctx, hex, true) ethBlockA, err := n2.EthGetBlockByNumber(ctx, hex, true)
// Cannot use static ErrFullRound error for comparison since it gets reserialized as a JSON RPC error.
if err != nil && strings.Contains(err.Error(), "null round") {
continue
}
require.NoError(t, err) require.NoError(t, err)
ethBlockB, err := n2.EthGetBlockByHash(ctx, ethBlockA.Hash, true) ethBlockB, err := n2.EthGetBlockByHash(ctx, ethBlockA.Hash, true)

View File

@ -236,14 +236,6 @@ func TestEthOpenRPCConformance(t *testing.T) {
skipReason: "earliest block is not supported", skipReason: "earliest block is not supported",
}, },
{
method: "eth_getBlockByNumber",
variant: "pending",
call: func(a *ethAPIRaw) (json.RawMessage, error) {
return ethapi.EthGetBlockByNumber(context.Background(), "pending", true)
},
},
{ {
method: "eth_getBlockByNumber", method: "eth_getBlockByNumber",
call: func(a *ethAPIRaw) (json.RawMessage, error) { call: func(a *ethAPIRaw) (json.RawMessage, error) {

View File

@ -3,18 +3,42 @@ package itests
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"sort"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/lib/result"
"github.com/filecoin-project/lotus/node/impl/full"
) )
// calculateExpectations calculates the expected number of items to be included in the response
// of eth_feeHistory. It takes care of null rounds by finding the closet tipset with height
// smaller than startHeight, and then looks back at requestAmount of items. It also considers
// scenarios where there are not enough items to look back.
func calculateExpectations(tsHeights []int, requestAmount, startHeight int) (count, oldestHeight int) {
latestIdx := sort.SearchInts(tsHeights, startHeight)
// SearchInts returns the index of the number that's larger than the target if the target
// doesn't exist. However, we're looking for the closet number that's smaller that the target
for tsHeights[latestIdx] > startHeight {
latestIdx--
}
cnt := requestAmount
oldestIdx := latestIdx - requestAmount + 1
if oldestIdx < 0 {
cnt = latestIdx + 1
oldestIdx = 0
}
return cnt, tsHeights[oldestIdx]
}
func TestEthFeeHistory(t *testing.T) { func TestEthFeeHistory(t *testing.T) {
require := require.New(t) require := require.New(t)
@ -22,70 +46,136 @@ func TestEthFeeHistory(t *testing.T) {
blockTime := 100 * time.Millisecond blockTime := 100 * time.Millisecond
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
ens.InterconnectAll().BeginMining(blockTime)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute) ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel() defer cancel()
// Wait for the network to create 20 blocks heads, err := client.ChainNotify(ctx)
require.NoError(err)
// Save the full view of the tipsets to calculate the answer when there are null rounds
tsHeights := []int{1}
go func() {
for chg := range heads {
for _, c := range chg {
tsHeights = append(tsHeights, int(c.Val.Height()))
}
}
}()
miner := ens.InterconnectAll().BeginMining(blockTime)
client.WaitTillChain(ctx, kit.HeightAtLeast(7))
miner[0].InjectNulls(abi.ChainEpoch(5))
// Wait for the network to create at least 20 tipsets
client.WaitTillChain(ctx, kit.HeightAtLeast(20)) client.WaitTillChain(ctx, kit.HeightAtLeast(20))
for _, m := range miner {
m.Pause()
}
ch, err := client.ChainNotify(ctx)
require.NoError(err)
// Wait for 5 seconds of inactivity
func() {
for {
select {
case <-ch:
continue
case <-time.After(5 * time.Second):
return
}
}
}()
sort.Ints(tsHeights)
// because of the deferred execution, the last tipset is not executed yet,
// and the one before the last one is the last executed tipset,
// which corresponds to the "latest" tag in EthGetBlockByNumber
latestBlk := ethtypes.EthUint64(tsHeights[len(tsHeights)-2])
blk, err := client.EthGetBlockByNumber(ctx, "latest", false)
require.NoError(err)
require.Equal(blk.Number, latestBlk)
assertHistory := func(history *ethtypes.EthFeeHistory, requestAmount, startHeight int) {
amount, oldest := calculateExpectations(tsHeights, requestAmount, startHeight)
require.Equal(amount+1, len(history.BaseFeePerGas))
require.Equal(amount, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(oldest), history.OldestBlock)
}
history, err := client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err := client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "0x10"}), json.Marshal([]interface{}{5, "0x10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 16)
require.Equal(5, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(16-5+1), history.OldestBlock)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"5", "0x10"}), json.Marshal([]interface{}{"5", "0x10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 16)
require.Equal(5, len(history.GasUsedRatio)) require.Nil(history.Reward)
require.Equal(ethtypes.EthUint64(16-5+1), history.OldestBlock)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "latest"}),
).Assert(require.NoError))
require.NoError(err)
assertHistory(&history, 5, int(latestBlk))
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"0x10", "0x12"}), json.Marshal([]interface{}{"0x10", "0x12"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(17, len(history.BaseFeePerGas)) assertHistory(&history, 16, 18)
require.Equal(16, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(18-16+1), history.OldestBlock)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "0x10"}), json.Marshal([]interface{}{5, "0x10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 16)
require.Equal(5, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(16-5+1), history.OldestBlock)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10"}), json.Marshal([]interface{}{5, "10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 10)
require.Equal(5, len(history.GasUsedRatio)) require.Nil(history.Reward)
require.Equal(ethtypes.EthUint64(10-5+1), history.OldestBlock)
// test when the requested number of blocks is longer than chain length
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"0x30", "latest"}),
).Assert(require.NoError))
require.NoError(err)
assertHistory(&history, 48, int(latestBlk))
require.Nil(history.Reward)
// test when the requested number of blocks is longer than chain length
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"0x30", "10"}),
).Assert(require.NoError))
require.NoError(err)
assertHistory(&history, 48, 10)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10", &[]float64{25, 50, 75}}), json.Marshal([]interface{}{5, "10", &[]float64{25, 50, 75}}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 10)
require.Equal(5, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(10-5+1), history.OldestBlock)
require.NotNil(history.Reward) require.NotNil(history.Reward)
require.Equal(5, len(*history.Reward)) require.Equal(5, len(*history.Reward))
for _, arr := range *history.Reward { for _, arr := range *history.Reward {
require.Equal(3, len(arr)) require.Equal(3, len(arr))
for _, item := range arr {
require.Equal(ethtypes.EthBigInt(types.NewInt(full.MinGasPremium)), item)
}
} }
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
@ -93,6 +183,11 @@ func TestEthFeeHistory(t *testing.T) {
).Assert(require.NoError)) ).Assert(require.NoError))
require.Error(err) require.Error(err)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10", &[]float64{75, 50}}),
).Assert(require.NoError))
require.Error(err)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10", &[]float64{}}), json.Marshal([]interface{}{5, "10", &[]float64{}}),
).Assert(require.NoError)) ).Assert(require.NoError))

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/itests/kit"
@ -270,6 +271,57 @@ func TestContractInvocation(t *testing.T) {
require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status) require.EqualValues(t, ethtypes.EthUint64(0x1), receipt.Status)
} }
func TestGetBlockByNumber(t *testing.T) {
blockTime := 100 * time.Millisecond
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
bms := ens.InterconnectAll().BeginMining(blockTime)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
// create a new Ethereum account
_, ethAddr, filAddr := client.EVM().NewAccount()
// send some funds to the f410 address
kit.SendFunds(ctx, t, client, filAddr, types.FromFil(10))
latest, err := client.EthBlockNumber(ctx)
require.NoError(t, err)
// can get the latest block
_, err = client.EthGetBlockByNumber(ctx, latest.Hex(), true)
require.NoError(t, err)
// fail to get a future block
_, err = client.EthGetBlockByNumber(ctx, (latest + 10000).Hex(), true)
require.Error(t, err)
// inject 10 null rounds
bms[0].InjectNulls(10)
// wait until we produce blocks again
tctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
ch, err := client.ChainNotify(tctx)
require.NoError(t, err)
<-ch // current
hc := <-ch // wait for next block
require.Equal(t, store.HCApply, hc[0].Type)
afterNullHeight := hc[0].Val.Height()
// Fail when trying to fetch a null round.
_, err = client.EthGetBlockByNumber(ctx, (ethtypes.EthUint64(afterNullHeight - 1)).Hex(), true)
require.Error(t, err)
// Fetch balance on a null round; should not fail and should return previous balance.
// Should be lower than original balance.
bal, err := client.EthGetBalance(ctx, ethAddr, (ethtypes.EthUint64(afterNullHeight - 1)).Hex())
require.NoError(t, err)
require.NotEqual(t, big.Zero(), bal)
require.Equal(t, types.FromFil(10).Int, bal.Int)
}
func deployContractTx(ctx context.Context, client *kit.TestFullNode, ethAddr ethtypes.EthAddress, contract []byte) (*ethtypes.EthTxArgs, error) { func deployContractTx(ctx context.Context, client *kit.TestFullNode, ethAddr ethtypes.EthAddress, contract []byte) (*ethtypes.EthTxArgs, error) {
gaslimit, err := client.EthEstimateGas(ctx, ethtypes.EthCall{ gaslimit, err := client.EthEstimateGas(ctx, ethtypes.EthCall{
From: &ethAddr, From: &ethAddr,

View File

@ -221,7 +221,7 @@ type RpcReader struct {
res chan readRes res chan readRes
beginOnce *sync.Once beginOnce *sync.Once
closeOnce sync.Once closeOnce *sync.Once
} }
var ErrHasBody = errors.New("RPCReader has body, either already read from or from a client with no redirect support") var ErrHasBody = errors.New("RPCReader has body, either already read from or from a client with no redirect support")
@ -265,6 +265,7 @@ func (w *RpcReader) beginPost() {
w.postBody = nr.postBody w.postBody = nr.postBody
w.res = nr.res w.res = nr.res
w.beginOnce = nr.beginOnce w.beginOnce = nr.beginOnce
w.closeOnce = nr.closeOnce
} }
} }
@ -355,6 +356,7 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
res: make(chan readRes), res: make(chan readRes),
next: ch, next: ch,
beginOnce: &sync.Once{}, beginOnce: &sync.Once{},
closeOnce: &sync.Once{},
} }
switch req.Method { switch req.Method {

View File

@ -3,8 +3,8 @@ package rpcenc
import ( import (
"context" "context"
"fmt"
"io" "io"
"io/ioutil"
"net/http/httptest" "net/http/httptest"
"strings" "strings"
"sync" "sync"
@ -77,7 +77,12 @@ func (h *ReaderHandler) CloseReader(ctx context.Context, r io.Reader) error {
} }
func (h *ReaderHandler) ReadAll(ctx context.Context, r io.Reader) ([]byte, error) { func (h *ReaderHandler) ReadAll(ctx context.Context, r io.Reader) ([]byte, error) {
return ioutil.ReadAll(r) b, err := io.ReadAll(r)
if err != nil {
return nil, xerrors.Errorf("readall: %w", err)
}
return b, nil
} }
func (h *ReaderHandler) ReadNullLen(ctx context.Context, r io.Reader) (int64, error) { func (h *ReaderHandler) ReadNullLen(ctx context.Context, r io.Reader) (int64, error) {
@ -219,9 +224,15 @@ func TestReaderRedirect(t *testing.T) {
} }
func TestReaderRedirectDrop(t *testing.T) { func TestReaderRedirectDrop(t *testing.T) {
for i := 0; i < 10; i++ {
t.Run(fmt.Sprintf("test %d", i), testReaderRedirectDrop)
}
}
func testReaderRedirectDrop(t *testing.T) {
// lower timeout so that the dangling connection between client and reader is dropped quickly // lower timeout so that the dangling connection between client and reader is dropped quickly
// after the test. Otherwise httptest.Close is blocked. // after the test. Otherwise httptest.Close is blocked.
Timeout = 200 * time.Millisecond Timeout = 90 * time.Millisecond
var allClient struct { var allClient struct {
ReadAll func(ctx context.Context, r io.Reader) ([]byte, error) ReadAll func(ctx context.Context, r io.Reader) ([]byte, error)
@ -294,6 +305,8 @@ func TestReaderRedirectDrop(t *testing.T) {
done.Wait() done.Wait()
fmt.Println("---------------------")
// Redir client drops before subcall // Redir client drops before subcall
done.Add(1) done.Add(1)
@ -322,5 +335,9 @@ func TestReaderRedirectDrop(t *testing.T) {
// wait for subcall to finish // wait for subcall to finish
<-contCh <-contCh
require.ErrorContains(t, allServerHandler.subErr, "decoding params for 'ReaderHandler.ReadAll' (param: 0; custom decoder): context canceled") estr := allServerHandler.subErr.Error()
require.True(t,
strings.Contains(estr, "decoding params for 'ReaderHandler.ReadAll' (param: 0; custom decoder): context canceled") ||
strings.Contains(estr, "readall: unexpected EOF"), "unexpected error: %s", estr)
} }

View File

@ -89,13 +89,15 @@ func DefaultFullNode() *FullNode {
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
}, },
Chainstore: Chainstore{ Chainstore: Chainstore{
EnableSplitstore: false, EnableSplitstore: true,
Splitstore: Splitstore{ Splitstore: Splitstore{
ColdStoreType: "messages", ColdStoreType: "discard",
HotStoreType: "badger", HotStoreType: "badger",
MarkSetType: "badger", MarkSetType: "badger",
HotStoreFullGCFrequency: 20, HotStoreFullGCFrequency: 20,
HotStoreMaxSpaceThreshold: 150_000_000_000,
HotstoreMaxSpaceSafetyBuffer: 50_000_000_000,
}, },
}, },
Cluster: *DefaultUserRaftConfig(), Cluster: *DefaultUserRaftConfig(),

View File

@ -1286,6 +1286,35 @@ the compaction boundary; default is 0.`,
A value of 0 disables, while a value 1 will do full GC in every compaction. A value of 0 disables, while a value 1 will do full GC in every compaction.
Default is 20 (about once a week).`, Default is 20 (about once a week).`,
}, },
{
Name: "HotStoreMaxSpaceTarget",
Type: "uint64",
Comment: `HotStoreMaxSpaceTarget sets a target max disk size for the hotstore. Splitstore GC
will run moving GC if disk utilization gets within a threshold (150 GB) of the target.
Splitstore GC will NOT run moving GC if the total size of the move would get
within 50 GB of the target, and instead will run a more aggressive online GC.
If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore
GC will trigger moving GC if either configuration condition is met.
A reasonable minimum is 2x fully GCed hotstore size + 50 G buffer.
At this minimum size moving GC happens every time, any smaller and moving GC won't
be able to run. In spring 2023 this minimum is ~550 GB.`,
},
{
Name: "HotStoreMaxSpaceThreshold",
Type: "uint64",
Comment: `When HotStoreMaxSpaceTarget is set Moving GC will be triggered when total moving size
exceeds HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold`,
},
{
Name: "HotstoreMaxSpaceSafetyBuffer",
Type: "uint64",
Comment: `Safety buffer to prevent moving GC from overflowing disk when HotStoreMaxSpaceTarget
is set. Moving GC will not occur when total moving size exceeds
HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`,
},
}, },
"StorageMiner": []DocField{ "StorageMiner": []DocField{
{ {

View File

@ -601,6 +601,25 @@ type Splitstore struct {
// A value of 0 disables, while a value 1 will do full GC in every compaction. // A value of 0 disables, while a value 1 will do full GC in every compaction.
// Default is 20 (about once a week). // Default is 20 (about once a week).
HotStoreFullGCFrequency uint64 HotStoreFullGCFrequency uint64
// HotStoreMaxSpaceTarget sets a target max disk size for the hotstore. Splitstore GC
// will run moving GC if disk utilization gets within a threshold (150 GB) of the target.
// Splitstore GC will NOT run moving GC if the total size of the move would get
// within 50 GB of the target, and instead will run a more aggressive online GC.
// If both HotStoreFullGCFrequency and HotStoreMaxSpaceTarget are set then splitstore
// GC will trigger moving GC if either configuration condition is met.
// A reasonable minimum is 2x fully GCed hotstore size + 50 G buffer.
// At this minimum size moving GC happens every time, any smaller and moving GC won't
// be able to run. In spring 2023 this minimum is ~550 GB.
HotStoreMaxSpaceTarget uint64
// When HotStoreMaxSpaceTarget is set Moving GC will be triggered when total moving size
// exceeds HotstoreMaxSpaceTarget - HotstoreMaxSpaceThreshold
HotStoreMaxSpaceThreshold uint64
// Safety buffer to prevent moving GC from overflowing disk when HotStoreMaxSpaceTarget
// is set. Moving GC will not occur when total moving size exceeds
// HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer
HotstoreMaxSpaceSafetyBuffer uint64
} }
// // Full Node // // Full Node

View File

@ -193,25 +193,14 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]
return nil, nil return nil, nil
} }
// TODO: need to get the number of messages better than this receipts, err := a.Chain.ReadReceipts(ctx, b.ParentMessageReceipts)
pts, err := a.Chain.LoadTipSet(ctx, types.NewTipSetKey(b.Parents...))
if err != nil { if err != nil {
return nil, err return nil, err
} }
cm, err := a.Chain.MessagesForTipset(ctx, pts) out := make([]*types.MessageReceipt, len(receipts))
if err != nil { for i := range receipts {
return nil, err out[i] = &receipts[i]
}
var out []*types.MessageReceipt
for i := 0; i < len(cm); i++ {
r, err := a.Chain.GetParentReceipt(ctx, b, i)
if err != nil {
return nil, err
}
out = append(out, r)
} }
return out, nil return out, nil

View File

@ -153,6 +153,8 @@ type EthAPI struct {
EthEventAPI EthEventAPI
} }
var ErrNullRound = errors.New("requested epoch was a null round")
func (a *EthModule) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) { func (a *EthModule) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
return stmgr.GetNetworkName(ctx, a.StateManager, a.Chain.GetHeaviestTipSet().ParentState()) return stmgr.GetNetworkName(ctx, a.StateManager, a.Chain.GetHeaviestTipSet().ParentState())
} }
@ -231,15 +233,14 @@ func (a *EthModule) EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthH
return newEthBlockFromFilecoinTipSet(ctx, ts, fullTxInfo, a.Chain, a.StateAPI) return newEthBlockFromFilecoinTipSet(ctx, ts, fullTxInfo, a.Chain, a.StateAPI)
} }
func (a *EthModule) parseBlkParam(ctx context.Context, blkParam string) (tipset *types.TipSet, err error) { func (a *EthModule) parseBlkParam(ctx context.Context, blkParam string, strict bool) (tipset *types.TipSet, err error) {
if blkParam == "earliest" { switch blkParam {
return nil, fmt.Errorf("block param \"earliest\" is not supported") case "earliest", "pending":
return nil, fmt.Errorf("block param %q is not supported", blkParam)
} }
head := a.Chain.GetHeaviestTipSet() head := a.Chain.GetHeaviestTipSet()
switch blkParam { switch blkParam {
case "pending":
return head, nil
case "latest": case "latest":
parent, err := a.Chain.GetTipSetFromKey(ctx, head.Parents()) parent, err := a.Chain.GetTipSetFromKey(ctx, head.Parents())
if err != nil { if err != nil {
@ -252,16 +253,22 @@ func (a *EthModule) parseBlkParam(ctx context.Context, blkParam string) (tipset
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot parse block number: %v", err) return nil, fmt.Errorf("cannot parse block number: %v", err)
} }
ts, err := a.Chain.GetTipsetByHeight(ctx, abi.ChainEpoch(num), nil, false) if abi.ChainEpoch(num) > head.Height()-1 {
return nil, fmt.Errorf("requested a future epoch (beyond 'latest')")
}
ts, err := a.ChainAPI.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(num), head.Key())
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot get tipset at height: %v", num) return nil, fmt.Errorf("cannot get tipset at height: %v", num)
} }
if strict && ts.Height() != abi.ChainEpoch(num) {
return nil, ErrNullRound
}
return ts, nil return ts, nil
} }
} }
func (a *EthModule) EthGetBlockByNumber(ctx context.Context, blkParam string, fullTxInfo bool) (ethtypes.EthBlock, error) { func (a *EthModule) EthGetBlockByNumber(ctx context.Context, blkParam string, fullTxInfo bool) (ethtypes.EthBlock, error) {
ts, err := a.parseBlkParam(ctx, blkParam) ts, err := a.parseBlkParam(ctx, blkParam, true)
if err != nil { if err != nil {
return ethtypes.EthBlock{}, err return ethtypes.EthBlock{}, err
} }
@ -367,7 +374,7 @@ func (a *EthModule) EthGetTransactionCount(ctx context.Context, sender ethtypes.
return ethtypes.EthUint64(0), nil return ethtypes.EthUint64(0), nil
} }
ts, err := a.parseBlkParam(ctx, blkParam) ts, err := a.parseBlkParam(ctx, blkParam, false)
if err != nil { if err != nil {
return ethtypes.EthUint64(0), xerrors.Errorf("cannot parse block param: %s", blkParam) return ethtypes.EthUint64(0), xerrors.Errorf("cannot parse block param: %s", blkParam)
} }
@ -433,7 +440,7 @@ func (a *EthModule) EthGetTransactionReceipt(ctx context.Context, txHash ethtype
} }
} }
receipt, err := newEthTxReceipt(ctx, tx, msgLookup, replay, events, a.StateAPI) receipt, err := newEthTxReceipt(ctx, tx, replay, events, a.StateAPI)
if err != nil { if err != nil {
return nil, nil return nil, nil
} }
@ -456,7 +463,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress,
return nil, xerrors.Errorf("cannot get Filecoin address: %w", err) return nil, xerrors.Errorf("cannot get Filecoin address: %w", err)
} }
ts, err := a.parseBlkParam(ctx, blkParam) ts, err := a.parseBlkParam(ctx, blkParam, false)
if err != nil { if err != nil {
return nil, xerrors.Errorf("cannot parse block param: %s", blkParam) return nil, xerrors.Errorf("cannot parse block param: %s", blkParam)
} }
@ -535,7 +542,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress,
} }
func (a *EthModule) EthGetStorageAt(ctx context.Context, ethAddr ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) { func (a *EthModule) EthGetStorageAt(ctx context.Context, ethAddr ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) {
ts, err := a.parseBlkParam(ctx, blkParam) ts, err := a.parseBlkParam(ctx, blkParam, false)
if err != nil { if err != nil {
return nil, xerrors.Errorf("cannot parse block param: %s", blkParam) return nil, xerrors.Errorf("cannot parse block param: %s", blkParam)
} }
@ -631,7 +638,7 @@ func (a *EthModule) EthGetBalance(ctx context.Context, address ethtypes.EthAddre
return ethtypes.EthBigInt{}, err return ethtypes.EthBigInt{}, err
} }
ts, err := a.parseBlkParam(ctx, blkParam) ts, err := a.parseBlkParam(ctx, blkParam, false)
if err != nil { if err != nil {
return ethtypes.EthBigInt{}, xerrors.Errorf("cannot parse block param: %s", blkParam) return ethtypes.EthBigInt{}, xerrors.Errorf("cannot parse block param: %s", blkParam)
} }
@ -676,57 +683,48 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth
} }
} }
ts, err := a.parseBlkParam(ctx, params.NewestBlkNum) ts, err := a.parseBlkParam(ctx, params.NewestBlkNum, false)
if err != nil { if err != nil {
return ethtypes.EthFeeHistory{}, fmt.Errorf("bad block parameter %s: %s", params.NewestBlkNum, err) return ethtypes.EthFeeHistory{}, fmt.Errorf("bad block parameter %s: %s", params.NewestBlkNum, err)
} }
// Deal with the case that the chain is shorter than the number of requested blocks. var (
oldestBlkHeight := uint64(1) basefee = ts.Blocks()[0].ParentBaseFee
if abi.ChainEpoch(params.BlkCount) <= ts.Height() { oldestBlkHeight = uint64(1)
oldestBlkHeight = uint64(ts.Height()) - uint64(params.BlkCount) + 1
}
// NOTE: baseFeePerGas should include the next block after the newest of the returned range, // NOTE: baseFeePerGas should include the next block after the newest of the returned range,
// because the next base fee can be inferred from the messages in the newest block. // because the next base fee can be inferred from the messages in the newest block.
// However, this is NOT the case in Filecoin due to deferred execution, so the best // However, this is NOT the case in Filecoin due to deferred execution, so the best
// we can do is duplicate the last value. // we can do is duplicate the last value.
baseFeeArray := []ethtypes.EthBigInt{ethtypes.EthBigInt(ts.Blocks()[0].ParentBaseFee)} baseFeeArray = []ethtypes.EthBigInt{ethtypes.EthBigInt(basefee)}
gasUsedRatioArray := []float64{} rewardsArray = make([][]ethtypes.EthBigInt, 0)
rewardsArray := make([][]ethtypes.EthBigInt, 0) gasUsedRatioArray = []float64{}
blocksIncluded int
)
for ts.Height() >= abi.ChainEpoch(oldestBlkHeight) { for blocksIncluded < int(params.BlkCount) && ts.Height() > 0 {
// Unfortunately we need to rebuild the full message view so we can msgs, rcpts, err := messagesAndReceipts(ctx, ts, a.Chain, a.StateAPI)
// totalize gas used in the tipset.
msgs, err := a.Chain.MessagesForTipset(ctx, ts)
if err != nil { if err != nil {
return ethtypes.EthFeeHistory{}, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err) return ethtypes.EthFeeHistory{}, xerrors.Errorf("failed to retrieve messages and receipts for height %d: %w", ts.Height(), err)
} }
txGasRewards := gasRewardSorter{} txGasRewards := gasRewardSorter{}
for txIdx, msg := range msgs { for i, msg := range msgs {
msgLookup, err := a.StateAPI.StateSearchMsg(ctx, types.EmptyTSK, msg.Cid(), api.LookbackNoLimit, false) effectivePremium := msg.VMMessage().EffectiveGasPremium(basefee)
if err != nil || msgLookup == nil {
return ethtypes.EthFeeHistory{}, nil
}
tx, err := newEthTxFromMessageLookup(ctx, msgLookup, txIdx, a.Chain, a.StateAPI)
if err != nil {
return ethtypes.EthFeeHistory{}, nil
}
txGasRewards = append(txGasRewards, gasRewardTuple{ txGasRewards = append(txGasRewards, gasRewardTuple{
reward: tx.Reward(ts.Blocks()[0].ParentBaseFee), premium: effectivePremium,
gas: uint64(msgLookup.Receipt.GasUsed), gasUsed: rcpts[i].GasUsed,
}) })
} }
rewards, totalGasUsed := calculateRewardsAndGasUsed(rewardPercentiles, txGasRewards) rewards, totalGasUsed := calculateRewardsAndGasUsed(rewardPercentiles, txGasRewards)
// arrays should be reversed at the end // arrays should be reversed at the end
baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(ts.Blocks()[0].ParentBaseFee)) baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(basefee))
gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(build.BlockGasLimit)) gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(build.BlockGasLimit))
rewardsArray = append(rewardsArray, rewards) rewardsArray = append(rewardsArray, rewards)
oldestBlkHeight = uint64(ts.Height())
blocksIncluded++
parentTsKey := ts.Parents() parentTsKey := ts.Parents()
ts, err = a.Chain.LoadTipSet(ctx, parentTsKey) ts, err = a.Chain.LoadTipSet(ctx, parentTsKey)
@ -1066,7 +1064,7 @@ func (a *EthModule) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam s
return nil, xerrors.Errorf("failed to convert ethcall to filecoin message: %w", err) return nil, xerrors.Errorf("failed to convert ethcall to filecoin message: %w", err)
} }
ts, err := a.parseBlkParam(ctx, blkParam) ts, err := a.parseBlkParam(ctx, blkParam, false)
if err != nil { if err != nil {
return nil, xerrors.Errorf("cannot parse block param: %s", blkParam) return nil, xerrors.Errorf("cannot parse block param: %s", blkParam)
} }
@ -1783,37 +1781,37 @@ func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTx
return ethtypes.EthBlock{}, err return ethtypes.EthBlock{}, err
} }
msgs, err := cs.MessagesForTipset(ctx, ts) msgs, rcpts, err := messagesAndReceipts(ctx, ts, cs, sa)
if err != nil { if err != nil {
return ethtypes.EthBlock{}, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err) return ethtypes.EthBlock{}, xerrors.Errorf("failed to retrieve messages and receipts: %w", err)
} }
block := ethtypes.NewEthBlock(len(msgs) > 0) block := ethtypes.NewEthBlock(len(msgs) > 0)
gasUsed := int64(0) gasUsed := int64(0)
compOutput, err := sa.StateCompute(ctx, ts.Height(), nil, ts.Key()) for i, msg := range msgs {
if err != nil { rcpt := rcpts[i]
return ethtypes.EthBlock{}, xerrors.Errorf("failed to compute state: %w", err) ti := ethtypes.EthUint64(i)
gasUsed += rcpt.GasUsed
var smsg *types.SignedMessage
switch msg := msg.(type) {
case *types.SignedMessage:
smsg = msg
case *types.Message:
smsg = &types.SignedMessage{
Message: *msg,
Signature: crypto.Signature{
Type: crypto.SigTypeBLS,
},
} }
default:
for txIdx, msg := range compOutput.Trace { return ethtypes.EthBlock{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.Cid(), err)
// skip system messages like reward application and cron
if msg.Msg.From == builtintypes.SystemActorAddr {
continue
} }
tx, err := newEthTxFromSignedMessage(ctx, smsg, sa)
gasUsed += msg.MsgRct.GasUsed
smsgCid, err := getSignedMessage(ctx, cs, msg.MsgCid)
if err != nil {
return ethtypes.EthBlock{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.MsgCid, err)
}
tx, err := newEthTxFromSignedMessage(ctx, smsgCid, sa)
if err != nil { if err != nil {
return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err) return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err)
} }
ti := ethtypes.EthUint64(txIdx)
tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId) tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
tx.BlockHash = &blkHash tx.BlockHash = &blkHash
tx.BlockNumber = &bn tx.BlockNumber = &bn
@ -1835,6 +1833,29 @@ func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTx
return block, nil return block, nil
} }
func messagesAndReceipts(ctx context.Context, ts *types.TipSet, cs *store.ChainStore, sa StateAPI) ([]types.ChainMsg, []types.MessageReceipt, error) {
msgs, err := cs.MessagesForTipset(ctx, ts)
if err != nil {
return nil, nil, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err)
}
_, rcptRoot, err := sa.StateManager.TipSetState(ctx, ts)
if err != nil {
return nil, nil, xerrors.Errorf("failed to compute state: %w", err)
}
rcpts, err := cs.ReadReceipts(ctx, rcptRoot)
if err != nil {
return nil, nil, xerrors.Errorf("error loading receipts for tipset: %v: %w", ts, err)
}
if len(msgs) != len(rcpts) {
return nil, nil, xerrors.Errorf("receipts and message array lengths didn't match for tipset: %v: %w", ts, err)
}
return msgs, rcpts, nil
}
// lookupEthAddress makes its best effort at finding the Ethereum address for a // lookupEthAddress makes its best effort at finding the Ethereum address for a
// Filecoin address. It does the following: // Filecoin address. It does the following:
// //
@ -2030,7 +2051,7 @@ func newEthTxFromMessageLookup(ctx context.Context, msgLookup *api.MsgLookup, tx
return tx, nil return tx, nil
} }
func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLookup, replay *api.InvocResult, events []types.Event, sa StateAPI) (api.EthTxReceipt, error) { func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, replay *api.InvocResult, events []types.Event, sa StateAPI) (api.EthTxReceipt, error) {
var ( var (
transactionIndex ethtypes.EthUint64 transactionIndex ethtypes.EthUint64
blockHash ethtypes.EthHash blockHash ethtypes.EthHash
@ -2059,25 +2080,25 @@ func newEthTxReceipt(ctx context.Context, tx ethtypes.EthTx, lookup *api.MsgLook
LogsBloom: ethtypes.EmptyEthBloom[:], LogsBloom: ethtypes.EmptyEthBloom[:],
} }
if lookup.Receipt.ExitCode.IsSuccess() { if replay.MsgRct.ExitCode.IsSuccess() {
receipt.Status = 1 receipt.Status = 1
} }
if lookup.Receipt.ExitCode.IsError() { if replay.MsgRct.ExitCode.IsError() {
receipt.Status = 0 receipt.Status = 0
} }
receipt.GasUsed = ethtypes.EthUint64(lookup.Receipt.GasUsed) receipt.GasUsed = ethtypes.EthUint64(replay.MsgRct.GasUsed)
// TODO: handle CumulativeGasUsed // TODO: handle CumulativeGasUsed
receipt.CumulativeGasUsed = ethtypes.EmptyEthInt receipt.CumulativeGasUsed = ethtypes.EmptyEthInt
effectiveGasPrice := big.Div(replay.GasCost.TotalCost, big.NewInt(lookup.Receipt.GasUsed)) effectiveGasPrice := big.Div(replay.GasCost.TotalCost, big.NewInt(replay.MsgRct.GasUsed))
receipt.EffectiveGasPrice = ethtypes.EthBigInt(effectiveGasPrice) receipt.EffectiveGasPrice = ethtypes.EthBigInt(effectiveGasPrice)
if receipt.To == nil && lookup.Receipt.ExitCode.IsSuccess() { if receipt.To == nil && replay.MsgRct.ExitCode.IsSuccess() {
// Create and Create2 return the same things. // Create and Create2 return the same things.
var ret eam.CreateExternalReturn var ret eam.CreateExternalReturn
if err := ret.UnmarshalCBOR(bytes.NewReader(lookup.Receipt.Return)); err != nil { if err := ret.UnmarshalCBOR(bytes.NewReader(replay.MsgRct.Return)); err != nil {
return api.EthTxReceipt{}, xerrors.Errorf("failed to parse contract creation result: %w", err) return api.EthTxReceipt{}, xerrors.Errorf("failed to parse contract creation result: %w", err)
} }
addr := ethtypes.EthAddress(ret.EthAddress) addr := ethtypes.EthAddress(ret.EthAddress)
@ -2335,35 +2356,35 @@ func parseEthRevert(ret []byte) string {
return ethtypes.EthBytes(cbytes).String() return ethtypes.EthBytes(cbytes).String()
} }
func calculateRewardsAndGasUsed(rewardPercentiles []float64, txGasRewards gasRewardSorter) ([]ethtypes.EthBigInt, uint64) { func calculateRewardsAndGasUsed(rewardPercentiles []float64, txGasRewards gasRewardSorter) ([]ethtypes.EthBigInt, int64) {
var totalGasUsed uint64 var gasUsedTotal int64
for _, tx := range txGasRewards { for _, tx := range txGasRewards {
totalGasUsed += tx.gas gasUsedTotal += tx.gasUsed
} }
rewards := make([]ethtypes.EthBigInt, len(rewardPercentiles)) rewards := make([]ethtypes.EthBigInt, len(rewardPercentiles))
for i := range rewards { for i := range rewards {
rewards[i] = ethtypes.EthBigIntZero rewards[i] = ethtypes.EthBigInt(types.NewInt(MinGasPremium))
} }
if len(txGasRewards) == 0 { if len(txGasRewards) == 0 {
return rewards, totalGasUsed return rewards, gasUsedTotal
} }
sort.Stable(txGasRewards) sort.Stable(txGasRewards)
var idx int var idx int
var sum uint64 var sum int64
for i, percentile := range rewardPercentiles { for i, percentile := range rewardPercentiles {
threshold := uint64(float64(totalGasUsed) * percentile / 100) threshold := int64(float64(gasUsedTotal) * percentile / 100)
for sum < threshold && idx < len(txGasRewards)-1 { for sum < threshold && idx < len(txGasRewards)-1 {
sum += txGasRewards[idx].gas sum += txGasRewards[idx].gasUsed
idx++ idx++
} }
rewards[i] = txGasRewards[idx].reward rewards[i] = ethtypes.EthBigInt(txGasRewards[idx].premium)
} }
return rewards, totalGasUsed return rewards, gasUsedTotal
} }
func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) { func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) {
@ -2386,8 +2407,8 @@ func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid)
} }
type gasRewardTuple struct { type gasRewardTuple struct {
gas uint64 gasUsed int64
reward ethtypes.EthBigInt premium abi.TokenAmount
} }
// sorted in ascending order // sorted in ascending order
@ -2398,5 +2419,5 @@ func (g gasRewardSorter) Swap(i, j int) {
g[i], g[j] = g[j], g[i] g[i], g[j] = g[j], g[i]
} }
func (g gasRewardSorter) Less(i, j int) bool { func (g gasRewardSorter) Less(i, j int) bool {
return g[i].reward.Int.Cmp(g[j].reward.Int) == -1 return g[i].premium.Int.Cmp(g[j].premium.Int) == -1
} }

View File

@ -117,11 +117,8 @@ func TestReward(t *testing.T) {
{maxFeePerGas: big.NewInt(50), maxPriorityFeePerGas: big.NewInt(200), answer: big.NewInt(-50)}, {maxFeePerGas: big.NewInt(50), maxPriorityFeePerGas: big.NewInt(200), answer: big.NewInt(-50)},
} }
for _, tc := range testcases { for _, tc := range testcases {
tx := ethtypes.EthTx{ msg := &types.Message{GasFeeCap: tc.maxFeePerGas, GasPremium: tc.maxPriorityFeePerGas}
MaxFeePerGas: ethtypes.EthBigInt(tc.maxFeePerGas), reward := msg.EffectiveGasPremium(baseFee)
MaxPriorityFeePerGas: ethtypes.EthBigInt(tc.maxPriorityFeePerGas),
}
reward := tx.Reward(baseFee)
require.Equal(t, 0, reward.Int.Cmp(tc.answer.Int), reward, tc.answer) require.Equal(t, 0, reward.Int.Cmp(tc.answer.Int), reward, tc.answer)
} }
} }
@ -135,25 +132,25 @@ func TestRewardPercentiles(t *testing.T) {
{ {
percentiles: []float64{25, 50, 75}, percentiles: []float64{25, 50, 75},
txGasRewards: []gasRewardTuple{}, txGasRewards: []gasRewardTuple{},
answer: []int64{0, 0, 0}, answer: []int64{MinGasPremium, MinGasPremium, MinGasPremium},
}, },
{ {
percentiles: []float64{25, 50, 75, 100}, percentiles: []float64{25, 50, 75, 100},
txGasRewards: []gasRewardTuple{ txGasRewards: []gasRewardTuple{
{gas: uint64(0), reward: ethtypes.EthBigInt(big.NewInt(300))}, {gasUsed: int64(0), premium: big.NewInt(300)},
{gas: uint64(100), reward: ethtypes.EthBigInt(big.NewInt(200))}, {gasUsed: int64(100), premium: big.NewInt(200)},
{gas: uint64(350), reward: ethtypes.EthBigInt(big.NewInt(100))}, {gasUsed: int64(350), premium: big.NewInt(100)},
{gas: uint64(500), reward: ethtypes.EthBigInt(big.NewInt(600))}, {gasUsed: int64(500), premium: big.NewInt(600)},
{gas: uint64(300), reward: ethtypes.EthBigInt(big.NewInt(700))}, {gasUsed: int64(300), premium: big.NewInt(700)},
}, },
answer: []int64{200, 700, 700, 700}, answer: []int64{200, 700, 700, 700},
}, },
} }
for _, tc := range testcases { for _, tc := range testcases {
rewards, totalGasUsed := calculateRewardsAndGasUsed(tc.percentiles, tc.txGasRewards) rewards, totalGasUsed := calculateRewardsAndGasUsed(tc.percentiles, tc.txGasRewards)
gasUsed := uint64(0) var gasUsed int64
for _, tx := range tc.txGasRewards { for _, tx := range tc.txGasRewards {
gasUsed += tx.gas gasUsed += tx.gasUsed
} }
ans := []ethtypes.EthBigInt{} ans := []ethtypes.EthBigInt{}
for _, bi := range tc.answer { for _, bi := range tc.answer {

View File

@ -87,6 +87,9 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked
UniversalColdBlocks: cfg.Splitstore.ColdStoreType == "universal", UniversalColdBlocks: cfg.Splitstore.ColdStoreType == "universal",
HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention, HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention,
HotStoreFullGCFrequency: cfg.Splitstore.HotStoreFullGCFrequency, HotStoreFullGCFrequency: cfg.Splitstore.HotStoreFullGCFrequency,
HotstoreMaxSpaceTarget: cfg.Splitstore.HotStoreMaxSpaceTarget,
HotstoreMaxSpaceThreshold: cfg.Splitstore.HotStoreMaxSpaceThreshold,
HotstoreMaxSpaceSafetyBuffer: cfg.Splitstore.HotstoreMaxSpaceSafetyBuffer,
} }
ss, err := splitstore.Open(path, ds, hot, cold, cfg) ss, err := splitstore.Open(path, ds, hot, cold, cfg)
if err != nil { if err != nil {

View File

@ -123,7 +123,7 @@ func NetworkName(mctx helpers.MetricsCtx,
ctx := helpers.LifecycleCtx(mctx, lc) ctx := helpers.LifecycleCtx(mctx, lc)
sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil) sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -7,10 +7,11 @@ import (
"github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule) (*stmgr.StateManager, error) { func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS) (*stmgr.StateManager, error) {
sm, err := stmgr.NewStateManager(cs, exec, sys, us, b) sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -6,8 +6,10 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"os"
"reflect" "reflect"
"time" "time"
@ -19,9 +21,15 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
) )
var errSectorRemoved = errors.New("sector removed")
func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface{}, uint64, error) { func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface{}, uint64, error) {
next, processed, err := m.plan(events, user.(*SectorInfo)) next, processed, err := m.plan(events, user.(*SectorInfo))
if err != nil || next == nil { if err != nil || next == nil {
if err == errSectorRemoved && os.Getenv("LOTUS_KEEP_REMOVED_FSM_ACTIVE") != "1" {
return nil, processed, statemachine.ErrTerminated
}
l := Log{ l := Log{
Timestamp: uint64(time.Now().Unix()), Timestamp: uint64(time.Now().Unix()),
Message: fmt.Sprintf("state machine error: %s", err), Message: fmt.Sprintf("state machine error: %s", err),
@ -601,7 +609,7 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
case Removing: case Removing:
return m.handleRemoving, processed, nil return m.handleRemoving, processed, nil
case Removed: case Removed:
return nil, processed, nil return nil, processed, errSectorRemoved
case RemoveFailed: case RemoveFailed:
return m.handleRemoveFailed, processed, nil return m.handleRemoveFailed, processed, nil
@ -615,13 +623,14 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta
// Fatal errors // Fatal errors
case UndefinedSectorState: case UndefinedSectorState:
log.Error("sector update with undefined state!") log.Error("sector update with undefined state!")
return nil, processed, xerrors.Errorf("sector update with undefined state")
case FailedUnrecoverable: case FailedUnrecoverable:
log.Errorf("sector %d failed unrecoverably", state.SectorNumber) log.Errorf("sector %d failed unrecoverably", state.SectorNumber)
return nil, processed, xerrors.Errorf("sector %d failed unrecoverably", state.SectorNumber)
default: default:
log.Errorf("unexpected sector update state: %s", state.State) log.Errorf("unexpected sector update state: %s", state.State)
return nil, processed, xerrors.Errorf("unexpected sector update state: %s", state.State)
} }
return nil, processed, nil
} }
func (m *Sealing) onUpdateSector(ctx context.Context, state *SectorInfo) error { func (m *Sealing) onUpdateSector(ctx context.Context, state *SectorInfo) error {

View File

@ -289,14 +289,20 @@ func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
m.remoteHnd.ServeHTTP(w, r) m.remoteHnd.ServeHTTP(w, r)
} }
func schedNop(context.Context, Worker) error { var schedNop = PrepareAction{
Action: func(ctx context.Context, w Worker) error {
return nil return nil
},
PrepType: sealtasks.TTNoop,
} }
func (m *Manager) schedFetch(sector storiface.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error { func (m *Manager) schedFetch(sector storiface.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) PrepareAction {
return func(ctx context.Context, worker Worker) error { return PrepareAction{
Action: func(ctx context.Context, worker Worker) error {
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am)) _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am))
return err return err
},
PrepType: sealtasks.TTFetch,
} }
} }
@ -315,7 +321,8 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storiface.Secto
// if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and // if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and
// put it in the sealing scratch space. // put it in the sealing scratch space.
sealFetch := func(ctx context.Context, worker Worker) error { sealFetch := PrepareAction{
Action: func(ctx context.Context, worker Worker) error {
log.Debugf("copy sealed/cache sector data for sector %d", sector.ID) log.Debugf("copy sealed/cache sector data for sector %d", sector.ID)
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)) _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy))
_, err2 := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy)) _, err2 := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy))
@ -325,6 +332,8 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storiface.Secto
} }
return nil return nil
},
PrepType: sealtasks.TTFetch,
} }
if unsealed == nil { if unsealed == nil {

View File

@ -42,6 +42,10 @@ func WithPriority(ctx context.Context, priority int) context.Context {
const mib = 1 << 20 const mib = 1 << 20
type WorkerAction func(ctx context.Context, w Worker) error type WorkerAction func(ctx context.Context, w Worker) error
type PrepareAction struct {
Action WorkerAction
PrepType sealtasks.TaskType
}
type SchedWorker interface { type SchedWorker interface {
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
@ -130,7 +134,7 @@ type WorkerRequest struct {
Sel WorkerSelector Sel WorkerSelector
SchedId uuid.UUID SchedId uuid.UUID
prepare WorkerAction prepare PrepareAction
work WorkerAction work WorkerAction
start time.Time start time.Time
@ -157,7 +161,15 @@ func newScheduler(ctx context.Context, assigner string) (*Scheduler, error) {
case "", "utilization": case "", "utilization":
a = NewLowestUtilizationAssigner() a = NewLowestUtilizationAssigner()
case "spread": case "spread":
a = NewSpreadAssigner() a = NewSpreadAssigner(false)
case "experiment-spread-qcount":
a = NewSpreadAssigner(true)
case "experiment-spread-tasks":
a = NewSpreadTasksAssigner(false)
case "experiment-spread-tasks-qcount":
a = NewSpreadTasksAssigner(true)
case "experiment-random":
a = NewRandomAssigner()
default: default:
return nil, xerrors.Errorf("unknown assigner '%s'", assigner) return nil, xerrors.Errorf("unknown assigner '%s'", assigner)
} }
@ -189,7 +201,7 @@ func newScheduler(ctx context.Context, assigner string) (*Scheduler, error) {
}, nil }, nil
} }
func (sh *Scheduler) Schedule(ctx context.Context, sector storiface.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { func (sh *Scheduler) Schedule(ctx context.Context, sector storiface.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare PrepareAction, work WorkerAction) error {
ret := make(chan workerResponse) ret := make(chan workerResponse)
select { select {
@ -239,6 +251,13 @@ func (r *WorkerRequest) SealTask() sealtasks.SealTaskType {
} }
} }
func (r *WorkerRequest) PrepSealTask() sealtasks.SealTaskType {
return sealtasks.SealTaskType{
TaskType: r.prepare.PrepType,
RegisteredSealProof: r.Sector.ProofType,
}
}
type SchedDiagRequestInfo struct { type SchedDiagRequestInfo struct {
Sector abi.SectorID Sector abi.SectorID
TaskType sealtasks.TaskType TaskType sealtasks.TaskType

View File

@ -58,7 +58,7 @@ func (a *AssignerCommon) TrySched(sh *Scheduler) {
windows := make([]SchedWindow, windowsLen) windows := make([]SchedWindow, windowsLen)
for i := range windows { for i := range windows {
windows[i].Allocated = *NewActiveResources() windows[i].Allocated = *NewActiveResources(newTaskCounter())
} }
acceptableWindows := make([][]int, queueLen) // QueueIndex -> []OpenWindowIndex acceptableWindows := make([][]int, queueLen) // QueueIndex -> []OpenWindowIndex

View File

@ -0,0 +1,88 @@
package sealer
import (
"math/rand"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func NewRandomAssigner() Assigner {
return &AssignerCommon{
WindowSel: RandomWS,
}
}
func RandomWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
scheduled := 0
rmQueue := make([]int, 0, queueLen)
for sqi := 0; sqi < queueLen; sqi++ {
task := (*sh.SchedQueue)[sqi]
//bestAssigned := math.MaxInt // smaller = better
type choice struct {
selectedWindow int
needRes storiface.Resources
info storiface.WorkerInfo
bestWid storiface.WorkerID
}
choices := make([]choice, 0, len(acceptableWindows[task.IndexHeap]))
for i, wnd := range acceptableWindows[task.IndexHeap] {
wid := sh.OpenWindows[wnd].Worker
w := sh.Workers[wid]
res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i)
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) {
continue
}
choices = append(choices, choice{
selectedWindow: wnd,
needRes: res,
info: w.Info,
bestWid: wid,
})
}
if len(choices) == 0 {
// all windows full
continue
}
// chose randomly
randIndex := rand.Intn(len(choices))
selectedWindow := choices[randIndex].selectedWindow
needRes := choices[randIndex].needRes
info := choices[randIndex].info
bestWid := choices[randIndex].bestWid
log.Debugw("SCHED ASSIGNED",
"assigner", "darts",
"sqi", sqi,
"sector", task.Sector.ID.Number,
"task", task.TaskType,
"window", selectedWindow,
"worker", bestWid,
"choices", len(choices))
windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
rmQueue = append(rmQueue, sqi)
scheduled++
}
if len(rmQueue) > 0 {
for i := len(rmQueue) - 1; i >= 0; i-- {
sh.SchedQueue.Remove(rmQueue[i])
}
}
return scheduled
}

View File

@ -6,13 +6,14 @@ import (
"github.com/filecoin-project/lotus/storage/sealer/storiface" "github.com/filecoin-project/lotus/storage/sealer/storiface"
) )
func NewSpreadAssigner() Assigner { func NewSpreadAssigner(queued bool) Assigner {
return &AssignerCommon{ return &AssignerCommon{
WindowSel: SpreadWS, WindowSel: SpreadWS(queued),
} }
} }
func SpreadWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int { func SpreadWS(queued bool) func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
return func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
scheduled := 0 scheduled := 0
rmQueue := make([]int, 0, queueLen) rmQueue := make([]int, 0, queueLen)
workerAssigned := map[storiface.WorkerID]int{} workerAssigned := map[storiface.WorkerID]int{}
@ -38,7 +39,11 @@ func SpreadWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []
continue continue
} }
wu, _ := workerAssigned[wid] wu, found := workerAssigned[wid]
if !found && queued {
wu = w.TaskCounts()
workerAssigned[wid] = wu
}
if wu >= bestAssigned { if wu >= bestAssigned {
continue continue
} }
@ -56,6 +61,8 @@ func SpreadWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []
} }
log.Debugw("SCHED ASSIGNED", log.Debugw("SCHED ASSIGNED",
"assigner", "spread",
"spread-queued", queued,
"sqi", sqi, "sqi", sqi,
"sector", task.Sector.ID.Number, "sector", task.Sector.ID.Number,
"task", task.TaskType, "task", task.TaskType,
@ -79,3 +86,4 @@ func SpreadWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []
return scheduled return scheduled
} }
}

View File

@ -0,0 +1,98 @@
package sealer
import (
"math"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func NewSpreadTasksAssigner(queued bool) Assigner {
return &AssignerCommon{
WindowSel: SpreadTasksWS(queued),
}
}
type widTask struct {
wid storiface.WorkerID
tt sealtasks.TaskType
}
func SpreadTasksWS(queued bool) func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
return func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
scheduled := 0
rmQueue := make([]int, 0, queueLen)
workerAssigned := map[widTask]int{}
for sqi := 0; sqi < queueLen; sqi++ {
task := (*sh.SchedQueue)[sqi]
selectedWindow := -1
var needRes storiface.Resources
var info storiface.WorkerInfo
var bestWid widTask
bestAssigned := math.MaxInt // smaller = better
for i, wnd := range acceptableWindows[task.IndexHeap] {
wid := sh.OpenWindows[wnd].Worker
w := sh.Workers[wid]
res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i)
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) {
continue
}
wt := widTask{wid: wid, tt: task.TaskType}
wu, found := workerAssigned[wt]
if !found && queued {
st := task.SealTask()
wu = w.TaskCount(&st)
workerAssigned[wt] = wu
}
if wu >= bestAssigned {
continue
}
info = w.Info
needRes = res
bestWid = wt
selectedWindow = wnd
bestAssigned = wu
}
if selectedWindow < 0 {
// all windows full
continue
}
log.Debugw("SCHED ASSIGNED",
"assigner", "spread-tasks",
"spread-queued", queued,
"sqi", sqi,
"sector", task.Sector.ID.Number,
"task", task.TaskType,
"window", selectedWindow,
"worker", bestWid,
"assigned", bestAssigned)
workerAssigned[bestWid]++
windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
rmQueue = append(rmQueue, sqi)
scheduled++
}
if len(rmQueue) > 0 {
for i := len(rmQueue) - 1; i >= 0; i-- {
sh.SchedQueue.Remove(rmQueue[i])
}
}
return scheduled
}
}

View File

@ -74,6 +74,7 @@ func LowestUtilizationWS(sh *Scheduler, queueLen int, acceptableWindows [][]int,
} }
log.Debugw("SCHED ASSIGNED", log.Debugw("SCHED ASSIGNED",
"assigner", "util",
"sqi", sqi, "sqi", sqi,
"sector", task.Sector.ID.Number, "sector", task.Sector.ID.Number,
"task", task.TaskType, "task", task.TaskType,

View File

@ -13,18 +13,68 @@ type ActiveResources struct {
gpuUsed float64 gpuUsed float64
cpuUse uint64 cpuUse uint64
taskCounters map[sealtasks.SealTaskType]int taskCounters *taskCounter
cond *sync.Cond cond *sync.Cond
waiting int waiting int
} }
func NewActiveResources() *ActiveResources { type taskCounter struct {
return &ActiveResources{ taskCounters map[sealtasks.SealTaskType]int
// this lock is technically redundant, as ActiveResources is always accessed
// with the worker lock, but let's not panic if we ever change that
lk sync.Mutex
}
func newTaskCounter() *taskCounter {
return &taskCounter{
taskCounters: map[sealtasks.SealTaskType]int{}, taskCounters: map[sealtasks.SealTaskType]int{},
} }
} }
func (tc *taskCounter) Add(tt sealtasks.SealTaskType) {
tc.lk.Lock()
defer tc.lk.Unlock()
tc.taskCounters[tt]++
}
func (tc *taskCounter) Free(tt sealtasks.SealTaskType) {
tc.lk.Lock()
defer tc.lk.Unlock()
tc.taskCounters[tt]--
}
func (tc *taskCounter) Get(tt sealtasks.SealTaskType) int {
tc.lk.Lock()
defer tc.lk.Unlock()
return tc.taskCounters[tt]
}
func (tc *taskCounter) Sum() int {
tc.lk.Lock()
defer tc.lk.Unlock()
sum := 0
for _, v := range tc.taskCounters {
sum += v
}
return sum
}
func (tc *taskCounter) ForEach(cb func(tt sealtasks.SealTaskType, count int)) {
tc.lk.Lock()
defer tc.lk.Unlock()
for tt, count := range tc.taskCounters {
cb(tt, count)
}
}
func NewActiveResources(tc *taskCounter) *ActiveResources {
return &ActiveResources{
taskCounters: tc,
}
}
func (a *ActiveResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, tt sealtasks.SealTaskType, r storiface.Resources, locker sync.Locker, cb func() error) error { func (a *ActiveResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, tt sealtasks.SealTaskType, r storiface.Resources, locker sync.Locker, cb func() error) error {
for !a.CanHandleRequest(tt, r, id, "withResources", wr) { for !a.CanHandleRequest(tt, r, id, "withResources", wr) {
if a.cond == nil { if a.cond == nil {
@ -59,7 +109,7 @@ func (a *ActiveResources) Add(tt sealtasks.SealTaskType, wr storiface.WorkerReso
a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs)) a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs))
a.memUsedMin += r.MinMemory a.memUsedMin += r.MinMemory
a.memUsedMax += r.MaxMemory a.memUsedMax += r.MaxMemory
a.taskCounters[tt]++ a.taskCounters.Add(tt)
return a.utilization(wr) - startUtil return a.utilization(wr) - startUtil
} }
@ -71,7 +121,7 @@ func (a *ActiveResources) Free(tt sealtasks.SealTaskType, wr storiface.WorkerRes
a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs)) a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs))
a.memUsedMin -= r.MinMemory a.memUsedMin -= r.MinMemory
a.memUsedMax -= r.MaxMemory a.memUsedMax -= r.MaxMemory
a.taskCounters[tt]-- a.taskCounters.Free(tt)
if a.cond != nil { if a.cond != nil {
a.cond.Broadcast() a.cond.Broadcast()
@ -82,8 +132,8 @@ func (a *ActiveResources) Free(tt sealtasks.SealTaskType, wr storiface.WorkerRes
// handle the request. // handle the request.
func (a *ActiveResources) CanHandleRequest(tt sealtasks.SealTaskType, needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool { func (a *ActiveResources) CanHandleRequest(tt sealtasks.SealTaskType, needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool {
if needRes.MaxConcurrent > 0 { if needRes.MaxConcurrent > 0 {
if a.taskCounters[tt] >= needRes.MaxConcurrent { if a.taskCounters.Get(tt) >= needRes.MaxConcurrent {
log.Debugf("sched: not scheduling on worker %s for %s; at task limit tt=%s, curcount=%d", wid, caller, tt, a.taskCounters[tt]) log.Debugf("sched: not scheduling on worker %s for %s; at task limit tt=%s, curcount=%d", wid, caller, tt, a.taskCounters.Get(tt))
return false return false
} }
} }
@ -170,6 +220,15 @@ func (a *ActiveResources) utilization(wr storiface.WorkerResources) float64 { //
return max return max
} }
func (a *ActiveResources) taskCount(tt *sealtasks.SealTaskType) int {
// nil means all tasks
if tt == nil {
return a.taskCounters.Sum()
}
return a.taskCounters.Get(*tt)
}
func (wh *WorkerHandle) Utilization() float64 { func (wh *WorkerHandle) Utilization() float64 {
wh.lk.Lock() wh.lk.Lock()
u := wh.active.utilization(wh.Info.Resources) u := wh.active.utilization(wh.Info.Resources)
@ -183,3 +242,31 @@ func (wh *WorkerHandle) Utilization() float64 {
return u return u
} }
func (wh *WorkerHandle) TaskCounts() int {
wh.lk.Lock()
u := wh.active.taskCount(nil)
u += wh.preparing.taskCount(nil)
wh.lk.Unlock()
wh.wndLk.Lock()
for _, window := range wh.activeWindows {
u += window.Allocated.taskCount(nil)
}
wh.wndLk.Unlock()
return u
}
func (wh *WorkerHandle) TaskCount(tt *sealtasks.SealTaskType) int {
wh.lk.Lock()
u := wh.active.taskCount(tt)
u += wh.preparing.taskCount(tt)
wh.lk.Unlock()
wh.wndLk.Lock()
for _, window := range wh.activeWindows {
u += window.Allocated.taskCount(tt)
}
wh.wndLk.Unlock()
return u
}

View File

@ -288,7 +288,8 @@ func TestSched(t *testing.T) {
ProofType: spt, ProofType: spt,
} }
err := sched.Schedule(ctx, sectorRef, taskType, sel, func(ctx context.Context, w Worker) error { prep := PrepareAction{
Action: func(ctx context.Context, w Worker) error {
wi, err := w.Info(ctx) wi, err := w.Info(ctx)
require.NoError(t, err) require.NoError(t, err)
@ -306,7 +307,11 @@ func TestSched(t *testing.T) {
log.Info("OUT ", taskName) log.Info("OUT ", taskName)
return nil return nil
}, noopAction) },
PrepType: taskType,
}
err := sched.Schedule(ctx, sectorRef, taskType, sel, prep, noopAction)
if err != context.Canceled { if err != context.Canceled {
require.NoError(t, err, fmt.Sprint(l, l2)) require.NoError(t, err, fmt.Sprint(l, l2))
} }
@ -639,8 +644,8 @@ func BenchmarkTrySched(b *testing.B) {
Resources: decentWorkerResources, Resources: decentWorkerResources,
}, },
Enabled: true, Enabled: true,
preparing: NewActiveResources(), preparing: NewActiveResources(newTaskCounter()),
active: NewActiveResources(), active: NewActiveResources(newTaskCounter()),
} }
for i := 0; i < windows; i++ { for i := 0; i < windows; i++ {
@ -685,7 +690,7 @@ func TestWindowCompact(t *testing.T) {
for _, windowTasks := range start { for _, windowTasks := range start {
window := &SchedWindow{ window := &SchedWindow{
Allocated: *NewActiveResources(), Allocated: *NewActiveResources(newTaskCounter()),
} }
for _, task := range windowTasks { for _, task := range windowTasks {
@ -708,7 +713,7 @@ func TestWindowCompact(t *testing.T) {
require.Equal(t, len(start)-len(expect), -sw.windowsRequested) require.Equal(t, len(start)-len(expect), -sw.windowsRequested)
for wi, tasks := range expect { for wi, tasks := range expect {
expectRes := NewActiveResources() expectRes := NewActiveResources(newTaskCounter())
for ti, task := range tasks { for ti, task := range tasks {
require.Equal(t, task, wh.activeWindows[wi].Todo[ti].TaskType, "%d, %d", wi, ti) require.Equal(t, task, wh.activeWindows[wi].Todo[ti].TaskType, "%d, %d", wi, ti)

View File

@ -30,12 +30,14 @@ func newWorkerHandle(ctx context.Context, w Worker) (*WorkerHandle, error) {
return nil, xerrors.Errorf("getting worker info: %w", err) return nil, xerrors.Errorf("getting worker info: %w", err)
} }
tc := newTaskCounter()
worker := &WorkerHandle{ worker := &WorkerHandle{
workerRpc: w, workerRpc: w,
Info: info, Info: info,
preparing: NewActiveResources(), preparing: NewActiveResources(tc),
active: NewActiveResources(), active: NewActiveResources(tc),
Enabled: true, Enabled: true,
closingMgr: make(chan struct{}), closingMgr: make(chan struct{}),
@ -352,8 +354,8 @@ assignLoop:
worker.lk.Lock() worker.lk.Lock()
for t, todo := range firstWindow.Todo { for t, todo := range firstWindow.Todo {
needRes := worker.Info.Resources.ResourceSpec(todo.Sector.ProofType, todo.TaskType) needResPrep := worker.Info.Resources.PrepResourceSpec(todo.Sector.ProofType, todo.TaskType, todo.prepare.PrepType)
if worker.preparing.CanHandleRequest(todo.SealTask(), needRes, sw.wid, "startPreparing", worker.Info) { if worker.preparing.CanHandleRequest(todo.PrepSealTask(), needResPrep, sw.wid, "startPreparing", worker.Info) {
tidx = t tidx = t
break break
} }
@ -452,20 +454,21 @@ func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error {
w, sh := sw.worker, sw.sched w, sh := sw.worker, sw.sched
needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType) needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType)
needResPrep := w.Info.Resources.PrepResourceSpec(req.Sector.ProofType, req.TaskType, req.prepare.PrepType)
w.lk.Lock() w.lk.Lock()
w.preparing.Add(req.SealTask(), w.Info.Resources, needRes) w.preparing.Add(req.PrepSealTask(), w.Info.Resources, needResPrep)
w.lk.Unlock() w.lk.Unlock()
go func() { go func() {
// first run the prepare step (e.g. fetching sector data from other worker) // first run the prepare step (e.g. fetching sector data from other worker)
tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc) tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
tw.start() tw.start()
err := req.prepare(req.Ctx, tw) err := req.prepare.Action(req.Ctx, tw)
w.lk.Lock() w.lk.Lock()
if err != nil { if err != nil {
w.preparing.Free(req.SealTask(), w.Info.Resources, needRes) w.preparing.Free(req.PrepSealTask(), w.Info.Resources, needResPrep)
w.lk.Unlock() w.lk.Unlock()
select { select {
@ -495,7 +498,7 @@ func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error {
// wait (if needed) for resources in the 'active' window // wait (if needed) for resources in the 'active' window
err = w.active.withResources(sw.wid, w.Info, req.SealTask(), needRes, &w.lk, func() error { err = w.active.withResources(sw.wid, w.Info, req.SealTask(), needRes, &w.lk, func() error {
w.preparing.Free(req.SealTask(), w.Info.Resources, needRes) w.preparing.Free(req.PrepSealTask(), w.Info.Resources, needResPrep)
w.lk.Unlock() w.lk.Unlock()
defer w.lk.Lock() // we MUST return locked from this function defer w.lk.Lock() // we MUST return locked from this function

View File

@ -36,6 +36,8 @@ const (
TTGenerateWindowPoSt TaskType = "post/v0/windowproof" TTGenerateWindowPoSt TaskType = "post/v0/windowproof"
TTGenerateWinningPoSt TaskType = "post/v0/winningproof" TTGenerateWinningPoSt TaskType = "post/v0/winningproof"
TTNoop TaskType = ""
) )
var order = map[TaskType]int{ var order = map[TaskType]int{

View File

@ -43,9 +43,9 @@ func (m *Manager) WorkerStats(ctx context.Context) map[uuid.UUID]storiface.Worke
TaskCounts: map[string]int{}, TaskCounts: map[string]int{},
} }
for tt, count := range handle.active.taskCounters { handle.active.taskCounters.ForEach(func(tt sealtasks.SealTaskType, count int) {
out[uuid.UUID(id)].TaskCounts[tt.String()] = count out[uuid.UUID(id)].TaskCounts[tt.String()] = count
} })
handle.lk.Unlock() handle.lk.Unlock()
} }

View File

@ -65,6 +65,20 @@ func (wr WorkerResources) ResourceSpec(spt abi.RegisteredSealProof, tt sealtasks
return res return res
} }
// PrepResourceSpec is like ResourceSpec, but meant for use limiting parallel preparing
// tasks.
func (wr WorkerResources) PrepResourceSpec(spt abi.RegisteredSealProof, tt, prepTT sealtasks.TaskType) Resources {
res := wr.ResourceSpec(spt, tt)
if prepTT != tt && prepTT != sealtasks.TTNoop {
prepRes := wr.ResourceSpec(spt, prepTT)
res.MaxConcurrent = prepRes.MaxConcurrent
}
// otherwise, use the default resource table
return res
}
type WorkerStats struct { type WorkerStats struct {
Info WorkerInfo Info WorkerInfo
Tasks []sealtasks.TaskType Tasks []sealtasks.TaskType