Merge branch 'master' into raulk/fix-eth-parseBlkParam

This commit is contained in:
Raúl Kripalani 2023-03-09 12:16:23 +00:00
commit 941ef84b71
27 changed files with 774 additions and 152 deletions

View File

@ -15,6 +15,7 @@ import (
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
// MODIFYING THE API INTERFACE // MODIFYING THE API INTERFACE
@ -25,7 +26,7 @@ import (
// When adding / changing methods in this file: // When adding / changing methods in this file:
// * Do the change here // * Do the change here
// * Adjust implementation in `node/impl/` // * Adjust implementation in `node/impl/`
// * Run `make gen` - this will: // * Run `make clean && make deps && make gen` - this will:
// * Generate proxy structs // * Generate proxy structs
// * Generate mocks // * Generate mocks
// * Generate markdown docs // * Generate markdown docs
@ -47,15 +48,18 @@ type Gateway interface {
ChainReadObj(context.Context, cid.Cid) ([]byte, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainGetGenesis(context.Context) (*types.TipSet, error) ChainGetGenesis(context.Context) (*types.TipSet, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error) MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
@ -63,6 +67,7 @@ type Gateway interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)

View File

@ -724,6 +724,8 @@ type GatewayMethods struct {
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
@ -740,8 +742,12 @@ type GatewayMethods struct {
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) ``
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) `` StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) ``
StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) ``
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `` StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) `` StateListMiners func(p0 context.Context, p1 types.TipSetKey) ([]address.Address, error) ``
@ -758,9 +764,11 @@ type GatewayMethods struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) `` StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) ``
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"` StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) ``
StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
@ -4589,6 +4597,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
if s.Internal.MpoolGetNonce == nil {
return 0, ErrNotSupported
}
return s.Internal.MpoolGetNonce(p0, p1)
}
func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
return 0, ErrNotSupported
}
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
if s.Internal.MpoolPush == nil { if s.Internal.MpoolPush == nil {
return *new(cid.Cid), ErrNotSupported return *new(cid.Cid), ErrNotSupported
@ -4677,6 +4696,17 @@ func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2
return *new(address.Address), ErrNotSupported return *new(address.Address), ErrNotSupported
} }
func (s *GatewayStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) {
if s.Internal.StateCall == nil {
return nil, ErrNotSupported
}
return s.Internal.StateCall(p0, p1, p2)
}
func (s *GatewayStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*InvocResult, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) { func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (DealCollateralBounds, error) {
if s.Internal.StateDealProviderCollateralBounds == nil { if s.Internal.StateDealProviderCollateralBounds == nil {
return *new(DealCollateralBounds), ErrNotSupported return *new(DealCollateralBounds), ErrNotSupported
@ -4688,6 +4718,17 @@ func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 a
return *new(DealCollateralBounds), ErrNotSupported return *new(DealCollateralBounds), ErrNotSupported
} }
func (s *GatewayStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
if s.Internal.StateDecodeParams == nil {
return nil, ErrNotSupported
}
return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4)
}
func (s *GatewayStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
if s.Internal.StateGetActor == nil { if s.Internal.StateGetActor == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -4776,6 +4817,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
if s.Internal.StateNetworkName == nil {
return *new(dtypes.NetworkName), ErrNotSupported
}
return s.Internal.StateNetworkName(p0)
}
func (s *GatewayStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
return *new(dtypes.NetworkName), ErrNotSupported
}
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) { func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) {
if s.Internal.StateNetworkVersion == nil { if s.Internal.StateNetworkVersion == nil {
return *new(apitypes.NetworkVersion), ErrNotSupported return *new(apitypes.NetworkVersion), ErrNotSupported

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
// MODIFYING THE API INTERFACE // MODIFYING THE API INTERFACE
@ -44,12 +45,15 @@ type Gateway interface {
ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
@ -59,6 +63,7 @@ type Gateway interface {
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error) StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error)
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)

View File

@ -451,6 +451,8 @@ type GatewayMethods struct {
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) ``
MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) ``
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) ``
@ -461,8 +463,12 @@ type GatewayMethods struct {
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `` StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) ``
StateCall func(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) ``
StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) `` StateDealProviderCollateralBounds func(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) ``
StateDecodeParams func(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) ``
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `` StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) ``
StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) `` StateGetReceipt func(p0 context.Context, p1 cid.Cid, p2 types.TipSetKey) (*types.MessageReceipt, error) ``
@ -481,6 +487,8 @@ type GatewayMethods struct {
StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) ``
StateNetworkName func(p0 context.Context) (dtypes.NetworkName, error) ``
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) `` StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) ``
StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `` StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) ``
@ -2677,6 +2685,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
if s.Internal.MpoolGetNonce == nil {
return 0, ErrNotSupported
}
return s.Internal.MpoolGetNonce(p0, p1)
}
func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) {
return 0, ErrNotSupported
}
func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) {
if s.Internal.MpoolPush == nil { if s.Internal.MpoolPush == nil {
return *new(cid.Cid), ErrNotSupported return *new(cid.Cid), ErrNotSupported
@ -2732,6 +2751,17 @@ func (s *GatewayStub) StateAccountKey(p0 context.Context, p1 address.Address, p2
return *new(address.Address), ErrNotSupported return *new(address.Address), ErrNotSupported
} }
func (s *GatewayStruct) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
if s.Internal.StateCall == nil {
return nil, ErrNotSupported
}
return s.Internal.StateCall(p0, p1, p2)
}
func (s *GatewayStub) StateCall(p0 context.Context, p1 *types.Message, p2 types.TipSetKey) (*api.InvocResult, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) { func (s *GatewayStruct) StateDealProviderCollateralBounds(p0 context.Context, p1 abi.PaddedPieceSize, p2 bool, p3 types.TipSetKey) (api.DealCollateralBounds, error) {
if s.Internal.StateDealProviderCollateralBounds == nil { if s.Internal.StateDealProviderCollateralBounds == nil {
return *new(api.DealCollateralBounds), ErrNotSupported return *new(api.DealCollateralBounds), ErrNotSupported
@ -2743,6 +2773,17 @@ func (s *GatewayStub) StateDealProviderCollateralBounds(p0 context.Context, p1 a
return *new(api.DealCollateralBounds), ErrNotSupported return *new(api.DealCollateralBounds), ErrNotSupported
} }
func (s *GatewayStruct) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
if s.Internal.StateDecodeParams == nil {
return nil, ErrNotSupported
}
return s.Internal.StateDecodeParams(p0, p1, p2, p3, p4)
}
func (s *GatewayStub) StateDecodeParams(p0 context.Context, p1 address.Address, p2 abi.MethodNum, p3 []byte, p4 types.TipSetKey) (interface{}, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) { func (s *GatewayStruct) StateGetActor(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) {
if s.Internal.StateGetActor == nil { if s.Internal.StateGetActor == nil {
return nil, ErrNotSupported return nil, ErrNotSupported
@ -2842,6 +2883,17 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
if s.Internal.StateNetworkName == nil {
return *new(dtypes.NetworkName), ErrNotSupported
}
return s.Internal.StateNetworkName(p0)
}
func (s *GatewayStub) StateNetworkName(p0 context.Context) (dtypes.NetworkName, error) {
return *new(dtypes.NetworkName), ErrNotSupported
}
func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) { func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) {
if s.Internal.StateNetworkVersion == nil { if s.Internal.StateNetworkVersion == nil {
return *new(abinetwork.Version), ErrNotSupported return *new(abinetwork.Version), ErrNotSupported

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -19,6 +19,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/proof"
"github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
@ -640,7 +641,42 @@ It will not send any messages to the chain.`,
if err != nil { if err != nil {
return err return err
} }
jr, err := json.Marshal(res)
//convert sector information into easily readable information
type PoStPartition struct {
Index uint64
Skipped []uint64
}
type SubmitWindowedPoStParams struct {
Deadline uint64
Partitions []PoStPartition
Proofs []proof.PoStProof
ChainCommitEpoch abi.ChainEpoch
ChainCommitRand abi.Randomness
}
var postParams []SubmitWindowedPoStParams
for _, i := range res {
var postParam SubmitWindowedPoStParams
postParam.Deadline = i.Deadline
for id, part := range i.Partitions {
postParam.Partitions[id].Index = part.Index
count, err := part.Skipped.Count()
if err != nil {
return err
}
sectors, err := part.Skipped.All(count)
if err != nil {
return err
}
postParam.Partitions[id].Skipped = sectors
}
postParam.Proofs = i.Proofs
postParam.ChainCommitEpoch = i.ChainCommitEpoch
postParam.ChainCommitRand = i.ChainCommitRand
postParams = append(postParams, postParam)
}
jr, err := json.MarshalIndent(postParams, "", " ")
if err != nil { if err != nil {
return err return err
} }

View File

@ -27,6 +27,7 @@ import (
_ "github.com/filecoin-project/lotus/lib/sigs/secp" _ "github.com/filecoin-project/lotus/lib/sigs/secp"
"github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics"
"github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
const ( const (
@ -59,18 +60,22 @@ type TargetAPI interface {
ChainPutObj(context.Context, blocks.Block) error ChainPutObj(context.Context, blocks.Block) error
ChainGetGenesis(context.Context) (*types.TipSet, error) ChainGetGenesis(context.Context) (*types.TipSet, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error)
MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error) MsigGetPending(ctx context.Context, addr address.Address, ts types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error) StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error)

View File

@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/lib/sigs"
"github.com/filecoin-project/lotus/node/modules/dtypes"
) )
func (gw *Node) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) { func (gw *Node) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
@ -187,6 +188,13 @@ func (gw *Node) GasEstimateMessageGas(ctx context.Context, msg *types.Message, s
return gw.target.GasEstimateMessageGas(ctx, msg, spec, tsk) return gw.target.GasEstimateMessageGas(ctx, msg, spec, tsk)
} }
func (gw *Node) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return 0, err
}
return gw.target.MpoolGetNonce(ctx, addr)
}
func (gw *Node) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) { func (gw *Node) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return cid.Cid{}, err return cid.Cid{}, err
@ -248,6 +256,16 @@ func (gw *Node) StateAccountKey(ctx context.Context, addr address.Address, tsk t
return gw.target.StateAccountKey(ctx, addr, tsk) return gw.target.StateAccountKey(ctx, addr, tsk)
} }
func (gw *Node) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err
}
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
return nil, err
}
return gw.target.StateCall(ctx, msg, tsk)
}
func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return api.DealCollateralBounds{}, err return api.DealCollateralBounds{}, err
@ -258,6 +276,16 @@ func (gw *Node) StateDealProviderCollateralBounds(ctx context.Context, size abi.
return gw.target.StateDealProviderCollateralBounds(ctx, size, verified, tsk) return gw.target.StateDealProviderCollateralBounds(ctx, size, verified, tsk)
} }
func (gw *Node) StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err
}
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
return nil, err
}
return gw.target.StateDecodeParams(ctx, toAddr, method, params, tsk)
}
func (gw *Node) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { func (gw *Node) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err return nil, err
@ -308,6 +336,13 @@ func (gw *Node) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, t
return gw.target.StateMarketStorageDeal(ctx, dealId, tsk) return gw.target.StateMarketStorageDeal(ctx, dealId, tsk)
} }
func (gw *Node) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return *new(dtypes.NetworkName), err
}
return gw.target.StateNetworkName(ctx)
}
func (gw *Node) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { func (gw *Node) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil { if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return network.VersionMax, err return network.VersionMax, err

View File

@ -3,18 +3,42 @@ package itests
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"sort"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/itests/kit"
"github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/lib/result"
"github.com/filecoin-project/lotus/node/impl/full"
) )
// calculateExpectations calculates the expected number of items to be included in the response
// of eth_feeHistory. It takes care of null rounds by finding the closet tipset with height
// smaller than startHeight, and then looks back at requestAmount of items. It also considers
// scenarios where there are not enough items to look back.
func calculateExpectations(tsHeights []int, requestAmount, startHeight int) (count, oldestHeight int) {
latestIdx := sort.SearchInts(tsHeights, startHeight)
// SearchInts returns the index of the number that's larger than the target if the target
// doesn't exist. However, we're looking for the closet number that's smaller that the target
for tsHeights[latestIdx] > startHeight {
latestIdx--
}
cnt := requestAmount
oldestIdx := latestIdx - requestAmount + 1
if oldestIdx < 0 {
cnt = latestIdx + 1
oldestIdx = 0
}
return cnt, tsHeights[oldestIdx]
}
func TestEthFeeHistory(t *testing.T) { func TestEthFeeHistory(t *testing.T) {
require := require.New(t) require := require.New(t)
@ -22,70 +46,136 @@ func TestEthFeeHistory(t *testing.T) {
blockTime := 100 * time.Millisecond blockTime := 100 * time.Millisecond
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC()) client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
ens.InterconnectAll().BeginMining(blockTime)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute) ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel() defer cancel()
// Wait for the network to create 20 blocks heads, err := client.ChainNotify(ctx)
require.NoError(err)
// Save the full view of the tipsets to calculate the answer when there are null rounds
tsHeights := []int{1}
go func() {
for chg := range heads {
for _, c := range chg {
tsHeights = append(tsHeights, int(c.Val.Height()))
}
}
}()
miner := ens.InterconnectAll().BeginMining(blockTime)
client.WaitTillChain(ctx, kit.HeightAtLeast(7))
miner[0].InjectNulls(abi.ChainEpoch(5))
// Wait for the network to create at least 20 tipsets
client.WaitTillChain(ctx, kit.HeightAtLeast(20)) client.WaitTillChain(ctx, kit.HeightAtLeast(20))
for _, m := range miner {
m.Pause()
}
ch, err := client.ChainNotify(ctx)
require.NoError(err)
// Wait for 5 seconds of inactivity
func() {
for {
select {
case <-ch:
continue
case <-time.After(5 * time.Second):
return
}
}
}()
sort.Ints(tsHeights)
// because of the deferred execution, the last tipset is not executed yet,
// and the one before the last one is the last executed tipset,
// which corresponds to the "latest" tag in EthGetBlockByNumber
latestBlk := ethtypes.EthUint64(tsHeights[len(tsHeights)-2])
blk, err := client.EthGetBlockByNumber(ctx, "latest", false)
require.NoError(err)
require.Equal(blk.Number, latestBlk)
assertHistory := func(history *ethtypes.EthFeeHistory, requestAmount, startHeight int) {
amount, oldest := calculateExpectations(tsHeights, requestAmount, startHeight)
require.Equal(amount+1, len(history.BaseFeePerGas))
require.Equal(amount, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(oldest), history.OldestBlock)
}
history, err := client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err := client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "0x10"}), json.Marshal([]interface{}{5, "0x10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 16)
require.Equal(5, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(16-5+1), history.OldestBlock)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"5", "0x10"}), json.Marshal([]interface{}{"5", "0x10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 16)
require.Equal(5, len(history.GasUsedRatio)) require.Nil(history.Reward)
require.Equal(ethtypes.EthUint64(16-5+1), history.OldestBlock)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "latest"}),
).Assert(require.NoError))
require.NoError(err)
assertHistory(&history, 5, int(latestBlk))
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"0x10", "0x12"}), json.Marshal([]interface{}{"0x10", "0x12"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(17, len(history.BaseFeePerGas)) assertHistory(&history, 16, 18)
require.Equal(16, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(18-16+1), history.OldestBlock)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "0x10"}), json.Marshal([]interface{}{5, "0x10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 16)
require.Equal(5, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(16-5+1), history.OldestBlock)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10"}), json.Marshal([]interface{}{5, "10"}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 10)
require.Equal(5, len(history.GasUsedRatio)) require.Nil(history.Reward)
require.Equal(ethtypes.EthUint64(10-5+1), history.OldestBlock)
// test when the requested number of blocks is longer than chain length
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"0x30", "latest"}),
).Assert(require.NoError))
require.NoError(err)
assertHistory(&history, 48, int(latestBlk))
require.Nil(history.Reward)
// test when the requested number of blocks is longer than chain length
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{"0x30", "10"}),
).Assert(require.NoError))
require.NoError(err)
assertHistory(&history, 48, 10)
require.Nil(history.Reward) require.Nil(history.Reward)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10", &[]float64{25, 50, 75}}), json.Marshal([]interface{}{5, "10", &[]float64{25, 50, 75}}),
).Assert(require.NoError)) ).Assert(require.NoError))
require.NoError(err) require.NoError(err)
require.Equal(6, len(history.BaseFeePerGas)) assertHistory(&history, 5, 10)
require.Equal(5, len(history.GasUsedRatio))
require.Equal(ethtypes.EthUint64(10-5+1), history.OldestBlock)
require.NotNil(history.Reward) require.NotNil(history.Reward)
require.Equal(5, len(*history.Reward)) require.Equal(5, len(*history.Reward))
for _, arr := range *history.Reward { for _, arr := range *history.Reward {
require.Equal(3, len(arr)) require.Equal(3, len(arr))
for _, item := range arr {
require.Equal(ethtypes.EthBigInt(types.NewInt(full.MinGasPremium)), item)
}
} }
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
@ -93,6 +183,11 @@ func TestEthFeeHistory(t *testing.T) {
).Assert(require.NoError)) ).Assert(require.NoError))
require.Error(err) require.Error(err)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10", &[]float64{75, 50}}),
).Assert(require.NoError))
require.Error(err)
history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams]( history, err = client.EthFeeHistory(ctx, result.Wrap[jsonrpc.RawParams](
json.Marshal([]interface{}{5, "10", &[]float64{}}), json.Marshal([]interface{}{5, "10", &[]float64{}}),
).Assert(require.NoError)) ).Assert(require.NoError))

View File

@ -689,11 +689,7 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth
return ethtypes.EthFeeHistory{}, fmt.Errorf("bad block parameter %s: %s", params.NewestBlkNum, err) return ethtypes.EthFeeHistory{}, fmt.Errorf("bad block parameter %s: %s", params.NewestBlkNum, err)
} }
// Deal with the case that the chain is shorter than the number of requested blocks.
oldestBlkHeight := uint64(1) oldestBlkHeight := uint64(1)
if abi.ChainEpoch(params.BlkCount) <= ts.Height() {
oldestBlkHeight = uint64(ts.Height()) - uint64(params.BlkCount) + 1
}
// NOTE: baseFeePerGas should include the next block after the newest of the returned range, // NOTE: baseFeePerGas should include the next block after the newest of the returned range,
// because the next base fee can be inferred from the messages in the newest block. // because the next base fee can be inferred from the messages in the newest block.
@ -703,29 +699,32 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth
gasUsedRatioArray := []float64{} gasUsedRatioArray := []float64{}
rewardsArray := make([][]ethtypes.EthBigInt, 0) rewardsArray := make([][]ethtypes.EthBigInt, 0)
for ts.Height() >= abi.ChainEpoch(oldestBlkHeight) { blocksIncluded := 0
// Unfortunately we need to rebuild the full message view so we can for blocksIncluded < int(params.BlkCount) && ts.Height() > 0 {
// totalize gas used in the tipset. compOutput, err := a.StateCompute(ctx, ts.Height(), nil, ts.Key())
msgs, err := a.Chain.MessagesForTipset(ctx, ts)
if err != nil { if err != nil {
return ethtypes.EthFeeHistory{}, xerrors.Errorf("error loading messages for tipset: %v: %w", ts, err) return ethtypes.EthFeeHistory{}, xerrors.Errorf("cannot lookup the status of tipset: %v: %w", ts, err)
} }
txGasRewards := gasRewardSorter{} txGasRewards := gasRewardSorter{}
for txIdx, msg := range msgs { for _, msg := range compOutput.Trace {
msgLookup, err := a.StateAPI.StateSearchMsg(ctx, types.EmptyTSK, msg.Cid(), api.LookbackNoLimit, false) if msg.Msg.From == builtintypes.SystemActorAddr {
if err != nil || msgLookup == nil { continue
return ethtypes.EthFeeHistory{}, nil
} }
tx, err := newEthTxFromMessageLookup(ctx, msgLookup, txIdx, a.Chain, a.StateAPI) smsgCid, err := getSignedMessage(ctx, a.Chain, msg.MsgCid)
if err != nil { if err != nil {
return ethtypes.EthFeeHistory{}, nil return ethtypes.EthFeeHistory{}, xerrors.Errorf("failed to get signed msg %s: %w", msg.MsgCid, err)
}
tx, err := newEthTxFromSignedMessage(ctx, smsgCid, a.StateAPI)
if err != nil {
return ethtypes.EthFeeHistory{}, err
} }
txGasRewards = append(txGasRewards, gasRewardTuple{ txGasRewards = append(txGasRewards, gasRewardTuple{
reward: tx.Reward(ts.Blocks()[0].ParentBaseFee), reward: tx.Reward(ts.Blocks()[0].ParentBaseFee),
gas: uint64(msgLookup.Receipt.GasUsed), gas: uint64(msg.MsgRct.GasUsed),
}) })
} }
@ -735,6 +734,8 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth
baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(ts.Blocks()[0].ParentBaseFee)) baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(ts.Blocks()[0].ParentBaseFee))
gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(build.BlockGasLimit)) gasUsedRatioArray = append(gasUsedRatioArray, float64(totalGasUsed)/float64(build.BlockGasLimit))
rewardsArray = append(rewardsArray, rewards) rewardsArray = append(rewardsArray, rewards)
oldestBlkHeight = uint64(ts.Height())
blocksIncluded++
parentTsKey := ts.Parents() parentTsKey := ts.Parents()
ts, err = a.Chain.LoadTipSet(ctx, parentTsKey) ts, err = a.Chain.LoadTipSet(ctx, parentTsKey)
@ -1804,12 +1805,16 @@ func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTx
return ethtypes.EthBlock{}, xerrors.Errorf("failed to compute state: %w", err) return ethtypes.EthBlock{}, xerrors.Errorf("failed to compute state: %w", err)
} }
for txIdx, msg := range compOutput.Trace { txIdx := 0
for _, msg := range compOutput.Trace {
// skip system messages like reward application and cron // skip system messages like reward application and cron
if msg.Msg.From == builtintypes.SystemActorAddr { if msg.Msg.From == builtintypes.SystemActorAddr {
continue continue
} }
ti := ethtypes.EthUint64(txIdx)
txIdx++
gasUsed += msg.MsgRct.GasUsed gasUsed += msg.MsgRct.GasUsed
smsgCid, err := getSignedMessage(ctx, cs, msg.MsgCid) smsgCid, err := getSignedMessage(ctx, cs, msg.MsgCid)
if err != nil { if err != nil {
@ -1820,8 +1825,6 @@ func newEthBlockFromFilecoinTipSet(ctx context.Context, ts *types.TipSet, fullTx
return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err) return ethtypes.EthBlock{}, xerrors.Errorf("failed to convert msg to ethTx: %w", err)
} }
ti := ethtypes.EthUint64(txIdx)
tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId) tx.ChainID = ethtypes.EthUint64(build.Eip155ChainId)
tx.BlockHash = &blkHash tx.BlockHash = &blkHash
tx.BlockNumber = &bn tx.BlockNumber = &bn
@ -2351,7 +2354,7 @@ func calculateRewardsAndGasUsed(rewardPercentiles []float64, txGasRewards gasRew
rewards := make([]ethtypes.EthBigInt, len(rewardPercentiles)) rewards := make([]ethtypes.EthBigInt, len(rewardPercentiles))
for i := range rewards { for i := range rewards {
rewards[i] = ethtypes.EthBigIntZero rewards[i] = ethtypes.EthBigInt(types.NewInt(MinGasPremium))
} }
if len(txGasRewards) == 0 { if len(txGasRewards) == 0 {

View File

@ -135,7 +135,7 @@ func TestRewardPercentiles(t *testing.T) {
{ {
percentiles: []float64{25, 50, 75}, percentiles: []float64{25, 50, 75},
txGasRewards: []gasRewardTuple{}, txGasRewards: []gasRewardTuple{},
answer: []int64{0, 0, 0}, answer: []int64{MinGasPremium, MinGasPremium, MinGasPremium},
}, },
{ {
percentiles: []float64{25, 50, 75, 100}, percentiles: []float64{25, 50, 75, 100},

View File

@ -289,14 +289,20 @@ func (m *Manager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
m.remoteHnd.ServeHTTP(w, r) m.remoteHnd.ServeHTTP(w, r)
} }
func schedNop(context.Context, Worker) error { var schedNop = PrepareAction{
return nil Action: func(ctx context.Context, w Worker) error {
return nil
},
PrepType: sealtasks.TTNoop,
} }
func (m *Manager) schedFetch(sector storiface.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) func(context.Context, Worker) error { func (m *Manager) schedFetch(sector storiface.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) PrepareAction {
return func(ctx context.Context, worker Worker) error { return PrepareAction{
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am)) Action: func(ctx context.Context, worker Worker) error {
return err _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, ft, ptype, am))
return err
},
PrepType: sealtasks.TTFetch,
} }
} }
@ -315,16 +321,19 @@ func (m *Manager) SectorsUnsealPiece(ctx context.Context, sector storiface.Secto
// if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and // if the selected worker does NOT have the sealed files for the sector, instruct it to fetch it from a worker that has them and
// put it in the sealing scratch space. // put it in the sealing scratch space.
sealFetch := func(ctx context.Context, worker Worker) error { sealFetch := PrepareAction{
log.Debugf("copy sealed/cache sector data for sector %d", sector.ID) Action: func(ctx context.Context, worker Worker) error {
_, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy)) log.Debugf("copy sealed/cache sector data for sector %d", sector.ID)
_, err2 := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy)) _, err := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy))
_, err2 := m.waitSimpleCall(ctx)(worker.Fetch(ctx, sector, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy))
if err != nil && err2 != nil { if err != nil && err2 != nil {
return xerrors.Errorf("cannot unseal piece. error fetching sealed data: %w. error fetching replica data: %w", err, err2) return xerrors.Errorf("cannot unseal piece. error fetching sealed data: %w. error fetching replica data: %w", err, err2)
} }
return nil return nil
},
PrepType: sealtasks.TTFetch,
} }
if unsealed == nil { if unsealed == nil {

View File

@ -42,6 +42,10 @@ func WithPriority(ctx context.Context, priority int) context.Context {
const mib = 1 << 20 const mib = 1 << 20
type WorkerAction func(ctx context.Context, w Worker) error type WorkerAction func(ctx context.Context, w Worker) error
type PrepareAction struct {
Action WorkerAction
PrepType sealtasks.TaskType
}
type SchedWorker interface { type SchedWorker interface {
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error)
@ -130,7 +134,7 @@ type WorkerRequest struct {
Sel WorkerSelector Sel WorkerSelector
SchedId uuid.UUID SchedId uuid.UUID
prepare WorkerAction prepare PrepareAction
work WorkerAction work WorkerAction
start time.Time start time.Time
@ -157,7 +161,15 @@ func newScheduler(ctx context.Context, assigner string) (*Scheduler, error) {
case "", "utilization": case "", "utilization":
a = NewLowestUtilizationAssigner() a = NewLowestUtilizationAssigner()
case "spread": case "spread":
a = NewSpreadAssigner() a = NewSpreadAssigner(false)
case "experiment-spread-qcount":
a = NewSpreadAssigner(true)
case "experiment-spread-tasks":
a = NewSpreadTasksAssigner(false)
case "experiment-spread-tasks-qcount":
a = NewSpreadTasksAssigner(true)
case "experiment-random":
a = NewRandomAssigner()
default: default:
return nil, xerrors.Errorf("unknown assigner '%s'", assigner) return nil, xerrors.Errorf("unknown assigner '%s'", assigner)
} }
@ -189,7 +201,7 @@ func newScheduler(ctx context.Context, assigner string) (*Scheduler, error) {
}, nil }, nil
} }
func (sh *Scheduler) Schedule(ctx context.Context, sector storiface.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare WorkerAction, work WorkerAction) error { func (sh *Scheduler) Schedule(ctx context.Context, sector storiface.SectorRef, taskType sealtasks.TaskType, sel WorkerSelector, prepare PrepareAction, work WorkerAction) error {
ret := make(chan workerResponse) ret := make(chan workerResponse)
select { select {
@ -239,6 +251,13 @@ func (r *WorkerRequest) SealTask() sealtasks.SealTaskType {
} }
} }
func (r *WorkerRequest) PrepSealTask() sealtasks.SealTaskType {
return sealtasks.SealTaskType{
TaskType: r.prepare.PrepType,
RegisteredSealProof: r.Sector.ProofType,
}
}
type SchedDiagRequestInfo struct { type SchedDiagRequestInfo struct {
Sector abi.SectorID Sector abi.SectorID
TaskType sealtasks.TaskType TaskType sealtasks.TaskType

View File

@ -58,7 +58,7 @@ func (a *AssignerCommon) TrySched(sh *Scheduler) {
windows := make([]SchedWindow, windowsLen) windows := make([]SchedWindow, windowsLen)
for i := range windows { for i := range windows {
windows[i].Allocated = *NewActiveResources() windows[i].Allocated = *NewActiveResources(newTaskCounter())
} }
acceptableWindows := make([][]int, queueLen) // QueueIndex -> []OpenWindowIndex acceptableWindows := make([][]int, queueLen) // QueueIndex -> []OpenWindowIndex

View File

@ -0,0 +1,88 @@
package sealer
import (
"math/rand"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func NewRandomAssigner() Assigner {
return &AssignerCommon{
WindowSel: RandomWS,
}
}
func RandomWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
scheduled := 0
rmQueue := make([]int, 0, queueLen)
for sqi := 0; sqi < queueLen; sqi++ {
task := (*sh.SchedQueue)[sqi]
//bestAssigned := math.MaxInt // smaller = better
type choice struct {
selectedWindow int
needRes storiface.Resources
info storiface.WorkerInfo
bestWid storiface.WorkerID
}
choices := make([]choice, 0, len(acceptableWindows[task.IndexHeap]))
for i, wnd := range acceptableWindows[task.IndexHeap] {
wid := sh.OpenWindows[wnd].Worker
w := sh.Workers[wid]
res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i)
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) {
continue
}
choices = append(choices, choice{
selectedWindow: wnd,
needRes: res,
info: w.Info,
bestWid: wid,
})
}
if len(choices) == 0 {
// all windows full
continue
}
// chose randomly
randIndex := rand.Intn(len(choices))
selectedWindow := choices[randIndex].selectedWindow
needRes := choices[randIndex].needRes
info := choices[randIndex].info
bestWid := choices[randIndex].bestWid
log.Debugw("SCHED ASSIGNED",
"assigner", "darts",
"sqi", sqi,
"sector", task.Sector.ID.Number,
"task", task.TaskType,
"window", selectedWindow,
"worker", bestWid,
"choices", len(choices))
windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
rmQueue = append(rmQueue, sqi)
scheduled++
}
if len(rmQueue) > 0 {
for i := len(rmQueue) - 1; i >= 0; i-- {
sh.SchedQueue.Remove(rmQueue[i])
}
}
return scheduled
}

View File

@ -6,76 +6,84 @@ import (
"github.com/filecoin-project/lotus/storage/sealer/storiface" "github.com/filecoin-project/lotus/storage/sealer/storiface"
) )
func NewSpreadAssigner() Assigner { func NewSpreadAssigner(queued bool) Assigner {
return &AssignerCommon{ return &AssignerCommon{
WindowSel: SpreadWS, WindowSel: SpreadWS(queued),
} }
} }
func SpreadWS(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int { func SpreadWS(queued bool) func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
scheduled := 0 return func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
rmQueue := make([]int, 0, queueLen) scheduled := 0
workerAssigned := map[storiface.WorkerID]int{} rmQueue := make([]int, 0, queueLen)
workerAssigned := map[storiface.WorkerID]int{}
for sqi := 0; sqi < queueLen; sqi++ { for sqi := 0; sqi < queueLen; sqi++ {
task := (*sh.SchedQueue)[sqi] task := (*sh.SchedQueue)[sqi]
selectedWindow := -1 selectedWindow := -1
var needRes storiface.Resources var needRes storiface.Resources
var info storiface.WorkerInfo var info storiface.WorkerInfo
var bestWid storiface.WorkerID var bestWid storiface.WorkerID
bestAssigned := math.MaxInt // smaller = better bestAssigned := math.MaxInt // smaller = better
for i, wnd := range acceptableWindows[task.IndexHeap] { for i, wnd := range acceptableWindows[task.IndexHeap] {
wid := sh.OpenWindows[wnd].Worker wid := sh.OpenWindows[wnd].Worker
w := sh.Workers[wid] w := sh.Workers[wid]
res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType) res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i) log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i)
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) { if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) {
continue
}
wu, found := workerAssigned[wid]
if !found && queued {
wu = w.TaskCounts()
workerAssigned[wid] = wu
}
if wu >= bestAssigned {
continue
}
info = w.Info
needRes = res
bestWid = wid
selectedWindow = wnd
bestAssigned = wu
}
if selectedWindow < 0 {
// all windows full
continue continue
} }
wu, _ := workerAssigned[wid] log.Debugw("SCHED ASSIGNED",
if wu >= bestAssigned { "assigner", "spread",
continue "spread-queued", queued,
"sqi", sqi,
"sector", task.Sector.ID.Number,
"task", task.TaskType,
"window", selectedWindow,
"worker", bestWid,
"assigned", bestAssigned)
workerAssigned[bestWid]++
windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
rmQueue = append(rmQueue, sqi)
scheduled++
}
if len(rmQueue) > 0 {
for i := len(rmQueue) - 1; i >= 0; i-- {
sh.SchedQueue.Remove(rmQueue[i])
} }
info = w.Info
needRes = res
bestWid = wid
selectedWindow = wnd
bestAssigned = wu
} }
if selectedWindow < 0 { return scheduled
// all windows full
continue
}
log.Debugw("SCHED ASSIGNED",
"sqi", sqi,
"sector", task.Sector.ID.Number,
"task", task.TaskType,
"window", selectedWindow,
"worker", bestWid,
"assigned", bestAssigned)
workerAssigned[bestWid]++
windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
rmQueue = append(rmQueue, sqi)
scheduled++
} }
if len(rmQueue) > 0 {
for i := len(rmQueue) - 1; i >= 0; i-- {
sh.SchedQueue.Remove(rmQueue[i])
}
}
return scheduled
} }

View File

@ -0,0 +1,98 @@
package sealer
import (
"math"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
func NewSpreadTasksAssigner(queued bool) Assigner {
return &AssignerCommon{
WindowSel: SpreadTasksWS(queued),
}
}
type widTask struct {
wid storiface.WorkerID
tt sealtasks.TaskType
}
func SpreadTasksWS(queued bool) func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
return func(sh *Scheduler, queueLen int, acceptableWindows [][]int, windows []SchedWindow) int {
scheduled := 0
rmQueue := make([]int, 0, queueLen)
workerAssigned := map[widTask]int{}
for sqi := 0; sqi < queueLen; sqi++ {
task := (*sh.SchedQueue)[sqi]
selectedWindow := -1
var needRes storiface.Resources
var info storiface.WorkerInfo
var bestWid widTask
bestAssigned := math.MaxInt // smaller = better
for i, wnd := range acceptableWindows[task.IndexHeap] {
wid := sh.OpenWindows[wnd].Worker
w := sh.Workers[wid]
res := w.Info.Resources.ResourceSpec(task.Sector.ProofType, task.TaskType)
log.Debugf("SCHED try assign sqi:%d sector %d to window %d (awi:%d)", sqi, task.Sector.ID.Number, wnd, i)
if !windows[wnd].Allocated.CanHandleRequest(task.SealTask(), res, wid, "schedAssign", w.Info) {
continue
}
wt := widTask{wid: wid, tt: task.TaskType}
wu, found := workerAssigned[wt]
if !found && queued {
st := task.SealTask()
wu = w.TaskCount(&st)
workerAssigned[wt] = wu
}
if wu >= bestAssigned {
continue
}
info = w.Info
needRes = res
bestWid = wt
selectedWindow = wnd
bestAssigned = wu
}
if selectedWindow < 0 {
// all windows full
continue
}
log.Debugw("SCHED ASSIGNED",
"assigner", "spread-tasks",
"spread-queued", queued,
"sqi", sqi,
"sector", task.Sector.ID.Number,
"task", task.TaskType,
"window", selectedWindow,
"worker", bestWid,
"assigned", bestAssigned)
workerAssigned[bestWid]++
windows[selectedWindow].Allocated.Add(task.SealTask(), info.Resources, needRes)
windows[selectedWindow].Todo = append(windows[selectedWindow].Todo, task)
rmQueue = append(rmQueue, sqi)
scheduled++
}
if len(rmQueue) > 0 {
for i := len(rmQueue) - 1; i >= 0; i-- {
sh.SchedQueue.Remove(rmQueue[i])
}
}
return scheduled
}
}

View File

@ -74,6 +74,7 @@ func LowestUtilizationWS(sh *Scheduler, queueLen int, acceptableWindows [][]int,
} }
log.Debugw("SCHED ASSIGNED", log.Debugw("SCHED ASSIGNED",
"assigner", "util",
"sqi", sqi, "sqi", sqi,
"sector", task.Sector.ID.Number, "sector", task.Sector.ID.Number,
"task", task.TaskType, "task", task.TaskType,

View File

@ -13,18 +13,68 @@ type ActiveResources struct {
gpuUsed float64 gpuUsed float64
cpuUse uint64 cpuUse uint64
taskCounters map[sealtasks.SealTaskType]int taskCounters *taskCounter
cond *sync.Cond cond *sync.Cond
waiting int waiting int
} }
func NewActiveResources() *ActiveResources { type taskCounter struct {
return &ActiveResources{ taskCounters map[sealtasks.SealTaskType]int
// this lock is technically redundant, as ActiveResources is always accessed
// with the worker lock, but let's not panic if we ever change that
lk sync.Mutex
}
func newTaskCounter() *taskCounter {
return &taskCounter{
taskCounters: map[sealtasks.SealTaskType]int{}, taskCounters: map[sealtasks.SealTaskType]int{},
} }
} }
func (tc *taskCounter) Add(tt sealtasks.SealTaskType) {
tc.lk.Lock()
defer tc.lk.Unlock()
tc.taskCounters[tt]++
}
func (tc *taskCounter) Free(tt sealtasks.SealTaskType) {
tc.lk.Lock()
defer tc.lk.Unlock()
tc.taskCounters[tt]--
}
func (tc *taskCounter) Get(tt sealtasks.SealTaskType) int {
tc.lk.Lock()
defer tc.lk.Unlock()
return tc.taskCounters[tt]
}
func (tc *taskCounter) Sum() int {
tc.lk.Lock()
defer tc.lk.Unlock()
sum := 0
for _, v := range tc.taskCounters {
sum += v
}
return sum
}
func (tc *taskCounter) ForEach(cb func(tt sealtasks.SealTaskType, count int)) {
tc.lk.Lock()
defer tc.lk.Unlock()
for tt, count := range tc.taskCounters {
cb(tt, count)
}
}
func NewActiveResources(tc *taskCounter) *ActiveResources {
return &ActiveResources{
taskCounters: tc,
}
}
func (a *ActiveResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, tt sealtasks.SealTaskType, r storiface.Resources, locker sync.Locker, cb func() error) error { func (a *ActiveResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, tt sealtasks.SealTaskType, r storiface.Resources, locker sync.Locker, cb func() error) error {
for !a.CanHandleRequest(tt, r, id, "withResources", wr) { for !a.CanHandleRequest(tt, r, id, "withResources", wr) {
if a.cond == nil { if a.cond == nil {
@ -59,7 +109,7 @@ func (a *ActiveResources) Add(tt sealtasks.SealTaskType, wr storiface.WorkerReso
a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs)) a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs))
a.memUsedMin += r.MinMemory a.memUsedMin += r.MinMemory
a.memUsedMax += r.MaxMemory a.memUsedMax += r.MaxMemory
a.taskCounters[tt]++ a.taskCounters.Add(tt)
return a.utilization(wr) - startUtil return a.utilization(wr) - startUtil
} }
@ -71,7 +121,7 @@ func (a *ActiveResources) Free(tt sealtasks.SealTaskType, wr storiface.WorkerRes
a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs)) a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs))
a.memUsedMin -= r.MinMemory a.memUsedMin -= r.MinMemory
a.memUsedMax -= r.MaxMemory a.memUsedMax -= r.MaxMemory
a.taskCounters[tt]-- a.taskCounters.Free(tt)
if a.cond != nil { if a.cond != nil {
a.cond.Broadcast() a.cond.Broadcast()
@ -82,8 +132,8 @@ func (a *ActiveResources) Free(tt sealtasks.SealTaskType, wr storiface.WorkerRes
// handle the request. // handle the request.
func (a *ActiveResources) CanHandleRequest(tt sealtasks.SealTaskType, needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool { func (a *ActiveResources) CanHandleRequest(tt sealtasks.SealTaskType, needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool {
if needRes.MaxConcurrent > 0 { if needRes.MaxConcurrent > 0 {
if a.taskCounters[tt] >= needRes.MaxConcurrent { if a.taskCounters.Get(tt) >= needRes.MaxConcurrent {
log.Debugf("sched: not scheduling on worker %s for %s; at task limit tt=%s, curcount=%d", wid, caller, tt, a.taskCounters[tt]) log.Debugf("sched: not scheduling on worker %s for %s; at task limit tt=%s, curcount=%d", wid, caller, tt, a.taskCounters.Get(tt))
return false return false
} }
} }
@ -170,6 +220,15 @@ func (a *ActiveResources) utilization(wr storiface.WorkerResources) float64 { //
return max return max
} }
func (a *ActiveResources) taskCount(tt *sealtasks.SealTaskType) int {
// nil means all tasks
if tt == nil {
return a.taskCounters.Sum()
}
return a.taskCounters.Get(*tt)
}
func (wh *WorkerHandle) Utilization() float64 { func (wh *WorkerHandle) Utilization() float64 {
wh.lk.Lock() wh.lk.Lock()
u := wh.active.utilization(wh.Info.Resources) u := wh.active.utilization(wh.Info.Resources)
@ -183,3 +242,31 @@ func (wh *WorkerHandle) Utilization() float64 {
return u return u
} }
func (wh *WorkerHandle) TaskCounts() int {
wh.lk.Lock()
u := wh.active.taskCount(nil)
u += wh.preparing.taskCount(nil)
wh.lk.Unlock()
wh.wndLk.Lock()
for _, window := range wh.activeWindows {
u += window.Allocated.taskCount(nil)
}
wh.wndLk.Unlock()
return u
}
func (wh *WorkerHandle) TaskCount(tt *sealtasks.SealTaskType) int {
wh.lk.Lock()
u := wh.active.taskCount(tt)
u += wh.preparing.taskCount(tt)
wh.lk.Unlock()
wh.wndLk.Lock()
for _, window := range wh.activeWindows {
u += window.Allocated.taskCount(tt)
}
wh.wndLk.Unlock()
return u
}

View File

@ -288,25 +288,30 @@ func TestSched(t *testing.T) {
ProofType: spt, ProofType: spt,
} }
err := sched.Schedule(ctx, sectorRef, taskType, sel, func(ctx context.Context, w Worker) error { prep := PrepareAction{
wi, err := w.Info(ctx) Action: func(ctx context.Context, w Worker) error {
require.NoError(t, err) wi, err := w.Info(ctx)
require.NoError(t, err)
require.Equal(t, expectWorker, wi.Hostname) require.Equal(t, expectWorker, wi.Hostname)
log.Info("IN ", taskName) log.Info("IN ", taskName)
for { for {
_, ok := <-done _, ok := <-done
if !ok { if !ok {
break break
}
} }
}
log.Info("OUT ", taskName) log.Info("OUT ", taskName)
return nil return nil
}, noopAction) },
PrepType: taskType,
}
err := sched.Schedule(ctx, sectorRef, taskType, sel, prep, noopAction)
if err != context.Canceled { if err != context.Canceled {
require.NoError(t, err, fmt.Sprint(l, l2)) require.NoError(t, err, fmt.Sprint(l, l2))
} }
@ -639,8 +644,8 @@ func BenchmarkTrySched(b *testing.B) {
Resources: decentWorkerResources, Resources: decentWorkerResources,
}, },
Enabled: true, Enabled: true,
preparing: NewActiveResources(), preparing: NewActiveResources(newTaskCounter()),
active: NewActiveResources(), active: NewActiveResources(newTaskCounter()),
} }
for i := 0; i < windows; i++ { for i := 0; i < windows; i++ {
@ -685,7 +690,7 @@ func TestWindowCompact(t *testing.T) {
for _, windowTasks := range start { for _, windowTasks := range start {
window := &SchedWindow{ window := &SchedWindow{
Allocated: *NewActiveResources(), Allocated: *NewActiveResources(newTaskCounter()),
} }
for _, task := range windowTasks { for _, task := range windowTasks {
@ -708,7 +713,7 @@ func TestWindowCompact(t *testing.T) {
require.Equal(t, len(start)-len(expect), -sw.windowsRequested) require.Equal(t, len(start)-len(expect), -sw.windowsRequested)
for wi, tasks := range expect { for wi, tasks := range expect {
expectRes := NewActiveResources() expectRes := NewActiveResources(newTaskCounter())
for ti, task := range tasks { for ti, task := range tasks {
require.Equal(t, task, wh.activeWindows[wi].Todo[ti].TaskType, "%d, %d", wi, ti) require.Equal(t, task, wh.activeWindows[wi].Todo[ti].TaskType, "%d, %d", wi, ti)

View File

@ -30,12 +30,14 @@ func newWorkerHandle(ctx context.Context, w Worker) (*WorkerHandle, error) {
return nil, xerrors.Errorf("getting worker info: %w", err) return nil, xerrors.Errorf("getting worker info: %w", err)
} }
tc := newTaskCounter()
worker := &WorkerHandle{ worker := &WorkerHandle{
workerRpc: w, workerRpc: w,
Info: info, Info: info,
preparing: NewActiveResources(), preparing: NewActiveResources(tc),
active: NewActiveResources(), active: NewActiveResources(tc),
Enabled: true, Enabled: true,
closingMgr: make(chan struct{}), closingMgr: make(chan struct{}),
@ -352,8 +354,8 @@ assignLoop:
worker.lk.Lock() worker.lk.Lock()
for t, todo := range firstWindow.Todo { for t, todo := range firstWindow.Todo {
needRes := worker.Info.Resources.ResourceSpec(todo.Sector.ProofType, todo.TaskType) needResPrep := worker.Info.Resources.PrepResourceSpec(todo.Sector.ProofType, todo.TaskType, todo.prepare.PrepType)
if worker.preparing.CanHandleRequest(todo.SealTask(), needRes, sw.wid, "startPreparing", worker.Info) { if worker.preparing.CanHandleRequest(todo.PrepSealTask(), needResPrep, sw.wid, "startPreparing", worker.Info) {
tidx = t tidx = t
break break
} }
@ -452,20 +454,21 @@ func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error {
w, sh := sw.worker, sw.sched w, sh := sw.worker, sw.sched
needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType) needRes := w.Info.Resources.ResourceSpec(req.Sector.ProofType, req.TaskType)
needResPrep := w.Info.Resources.PrepResourceSpec(req.Sector.ProofType, req.TaskType, req.prepare.PrepType)
w.lk.Lock() w.lk.Lock()
w.preparing.Add(req.SealTask(), w.Info.Resources, needRes) w.preparing.Add(req.PrepSealTask(), w.Info.Resources, needResPrep)
w.lk.Unlock() w.lk.Unlock()
go func() { go func() {
// first run the prepare step (e.g. fetching sector data from other worker) // first run the prepare step (e.g. fetching sector data from other worker)
tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc) tw := sh.workTracker.worker(sw.wid, w.Info, w.workerRpc)
tw.start() tw.start()
err := req.prepare(req.Ctx, tw) err := req.prepare.Action(req.Ctx, tw)
w.lk.Lock() w.lk.Lock()
if err != nil { if err != nil {
w.preparing.Free(req.SealTask(), w.Info.Resources, needRes) w.preparing.Free(req.PrepSealTask(), w.Info.Resources, needResPrep)
w.lk.Unlock() w.lk.Unlock()
select { select {
@ -495,7 +498,7 @@ func (sw *schedWorker) startProcessingTask(req *WorkerRequest) error {
// wait (if needed) for resources in the 'active' window // wait (if needed) for resources in the 'active' window
err = w.active.withResources(sw.wid, w.Info, req.SealTask(), needRes, &w.lk, func() error { err = w.active.withResources(sw.wid, w.Info, req.SealTask(), needRes, &w.lk, func() error {
w.preparing.Free(req.SealTask(), w.Info.Resources, needRes) w.preparing.Free(req.PrepSealTask(), w.Info.Resources, needResPrep)
w.lk.Unlock() w.lk.Unlock()
defer w.lk.Lock() // we MUST return locked from this function defer w.lk.Lock() // we MUST return locked from this function

View File

@ -36,6 +36,8 @@ const (
TTGenerateWindowPoSt TaskType = "post/v0/windowproof" TTGenerateWindowPoSt TaskType = "post/v0/windowproof"
TTGenerateWinningPoSt TaskType = "post/v0/winningproof" TTGenerateWinningPoSt TaskType = "post/v0/winningproof"
TTNoop TaskType = ""
) )
var order = map[TaskType]int{ var order = map[TaskType]int{

View File

@ -43,9 +43,9 @@ func (m *Manager) WorkerStats(ctx context.Context) map[uuid.UUID]storiface.Worke
TaskCounts: map[string]int{}, TaskCounts: map[string]int{},
} }
for tt, count := range handle.active.taskCounters { handle.active.taskCounters.ForEach(func(tt sealtasks.SealTaskType, count int) {
out[uuid.UUID(id)].TaskCounts[tt.String()] = count out[uuid.UUID(id)].TaskCounts[tt.String()] = count
} })
handle.lk.Unlock() handle.lk.Unlock()
} }

View File

@ -65,6 +65,20 @@ func (wr WorkerResources) ResourceSpec(spt abi.RegisteredSealProof, tt sealtasks
return res return res
} }
// PrepResourceSpec is like ResourceSpec, but meant for use limiting parallel preparing
// tasks.
func (wr WorkerResources) PrepResourceSpec(spt abi.RegisteredSealProof, tt, prepTT sealtasks.TaskType) Resources {
res := wr.ResourceSpec(spt, tt)
if prepTT != tt && prepTT != sealtasks.TTNoop {
prepRes := wr.ResourceSpec(spt, prepTT)
res.MaxConcurrent = prepRes.MaxConcurrent
}
// otherwise, use the default resource table
return res
}
type WorkerStats struct { type WorkerStats struct {
Info WorkerInfo Info WorkerInfo
Tasks []sealtasks.TaskType Tasks []sealtasks.TaskType