Merge remote-tracking branch 'origin/master' into feat/new-gw-methods

This commit is contained in:
Łukasz Magiera 2023-03-23 11:14:22 +01:00
commit 4ebc408a5a
142 changed files with 4519 additions and 2818 deletions

View File

@ -7,12 +7,12 @@ executors:
golang:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.18.8
- image: cimg/go:1.19.7
resource_class: medium+
golang-2xl:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.18.8
- image: cimg/go:1.19.7
resource_class: 2xlarge
ubuntu:
docker:
@ -818,6 +818,12 @@ workflows:
- build
suite: itest-mpool_push_with_uuid
target: "./itests/mpool_push_with_uuid_test.go"
- test:
name: test-itest-msgindex
requires:
- build
suite: itest-msgindex
target: "./itests/msgindex_test.go"
- test:
name: test-itest-multisig
requires:

View File

@ -7,12 +7,12 @@ executors:
golang:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.18.8
- image: cimg/go:1.19.7
resource_class: medium+
golang-2xl:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.18.8
- image: cimg/go:1.19.7
resource_class: 2xlarge
ubuntu:
docker:

View File

@ -154,6 +154,48 @@ New <code>ExecutionTrace</code>:
</details>
# v1.20.3 / 2023-03-09
A 🐈 stepped on the ⌨️ and made a mistake while resolving conflicts 😨. This releases only includes #10439 to fix that mistake. v1.20.2 is retracted - Please skip v1.20.2 and **only** update to v1.20.3!!!
## Changelog
> compare to v1.20.1
This is a HIGHLY RECOMMENDED patch release for node operators/API service providers that run ETH RPC service and an optional release for Storage Providers.
## Bug fixes
- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium #10413
- fix: eth API: return correct txIdx around null blocks #10419
- fix: Eth API: make block parameter parsing sounder. #10427
## Improvement
- feat: Lotus Gateway: Add missing methods - master #10420
- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x #10416
- We recommend storage providers to update your nodes to this patch, that will help improve developers who use Ethereum tooling's experience.
# v1.20.2 / 2023-03-09
This is a HIGHLY RECOMMENDED patch release for node operators/API service providers that run ETH RPC service and an optional release for Storage Providers.
## Bug fixes
- fix: EthAPI: use StateCompute for feeHistory; apply minimum gas premium #10413
- fix: eth API: return correct txIdx around null blocks #10419
- fix: Eth API: make block parameter parsing sounder. #10427
## Improvement
- feat: Lotus Gateway: Add missing methods - master #10420
- feat: mempool: Reduce minimum replace fee from 1.25x to 1.1x #10416
- We recommend storage providers to update your nodes to this patch, that will help improve developers who use Ethereum tooling's experience.
# v1.20.1 / 2023-03-06
This an optional patch releases for node operators/API service providers that run ETH RPC service.
## Bug fixes
- fix: EthAPI: Correctly get parent hash #10389
- fix: EthAPI: Make newEthBlockFromFilecoinTipSet faster and correct #10380
- fix: state: short-circuit genesis state computation
# 1.20.0 / 2023-02-28
This is a MANDATORY release of Lotus that delivers the [Hygge network upgrade](https://github.com/filecoin-project/community/discussions/74?sort=top#discussioncomment-4313888), introducing Filecoin network version 18. The centerpiece of the upgrade is the introduction of the [Filecoin Virtual Machine (FVM)s Milestone 2.1](https://fvm.filecoin.io/), which will allow for EVM-compatible contracts to be deployed on the Filecoin network. This upgrade delivers user-programmablity to the Filecoin network for the first time!

View File

@ -1 +1 @@
1.18.8
1.19.7

View File

@ -13,7 +13,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-jsonrpc"
@ -797,10 +797,12 @@ type FullNode interface {
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) //perm:read
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read

View File

@ -73,6 +73,7 @@ type Gateway interface {
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
@ -87,12 +88,12 @@ type Gateway interface {
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error)
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error)
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error)
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error)
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error)
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error)
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error)

View File

@ -11,7 +11,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
@ -212,9 +212,11 @@ type StorageMiner interface {
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
// MarketListRetrievalDeals is deprecated, returns empty list
MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin

View File

@ -50,22 +50,6 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
return err
}
// t.WaitSentinel (cid.Cid) (struct)
if len("WaitSentinel") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
return err
}
if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil {
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
}
// t.Vouchers ([]*paych.SignedVoucher) (slice)
if len("Vouchers") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Vouchers\" was too long")
@ -90,6 +74,23 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
return err
}
}
// t.WaitSentinel (cid.Cid) (struct)
if len("WaitSentinel") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
return err
}
if err := cbg.WriteCid(cw, t.WaitSentinel); err != nil {
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
}
return nil
}
@ -140,19 +141,6 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
return xerrors.Errorf("unmarshaling t.Channel: %w", err)
}
}
// t.WaitSentinel (cid.Cid) (struct)
case "WaitSentinel":
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
}
t.WaitSentinel = c
}
// t.Vouchers ([]*paych.SignedVoucher) (slice)
case "Vouchers":
@ -184,6 +172,20 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) {
t.Vouchers[i] = &v
}
// t.WaitSentinel (cid.Cid) (struct)
case "WaitSentinel":
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
}
t.WaitSentinel = c
}
default:
// Field doesn't exist on this type, so ignore it
cbg.ScanForLinks(r, func(cid.Cid) {})
@ -204,19 +206,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
return err
}
// t.SectorID (abi.SectorNumber) (uint64)
if len("SectorID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"SectorID\" was too long")
// t.Size (abi.UnpaddedPieceSize) (uint64)
if len("Size") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Size\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("SectorID")); err != nil {
if _, err := io.WriteString(w, string("Size")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
return err
}
@ -236,19 +238,19 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
return err
}
// t.Size (abi.UnpaddedPieceSize) (uint64)
if len("Size") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Size\" was too long")
// t.SectorID (abi.SectorNumber) (uint64)
if len("SectorID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"SectorID\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Size"))); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("SectorID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Size")); err != nil {
if _, err := io.WriteString(w, string("SectorID")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
return err
}
@ -293,8 +295,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.SectorID (abi.SectorNumber) (uint64)
case "SectorID":
// t.Size (abi.UnpaddedPieceSize) (uint64)
case "Size":
{
@ -305,7 +307,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.SectorID = abi.SectorNumber(extra)
t.Size = abi.UnpaddedPieceSize(extra)
}
// t.Offset (abi.PaddedPieceSize) (uint64)
@ -323,8 +325,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
t.Offset = abi.PaddedPieceSize(extra)
}
// t.Size (abi.UnpaddedPieceSize) (uint64)
case "Size":
// t.SectorID (abi.SectorNumber) (uint64)
case "SectorID":
{
@ -335,7 +337,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) {
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Size = abi.UnpaddedPieceSize(extra)
t.SectorID = abi.SectorNumber(extra)
}
@ -474,6 +476,28 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
// t.Value (abi.SealRandomness) (slice)
if len("Value") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Value\" was too long")
@ -497,28 +521,6 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error {
if _, err := cw.Write(t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
return nil
}
@ -560,7 +562,33 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.Value (abi.SealRandomness) (slice)
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
// t.Value (abi.SealRandomness) (slice)
case "Value":
maj, extra, err = cr.ReadHeader()
@ -582,32 +610,6 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) {
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it
@ -629,6 +631,28 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
// t.Value (abi.InteractiveSealRandomness) (slice)
if len("Value") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Value\" was too long")
@ -652,28 +676,6 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error {
if _, err := cw.Write(t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
if len("Epoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"Epoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Epoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("Epoch")); err != nil {
return err
}
if t.Epoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
return err
}
}
return nil
}
@ -715,7 +717,33 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.Value (abi.InteractiveSealRandomness) (slice)
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
// t.Value (abi.InteractiveSealRandomness) (slice)
case "Value":
maj, extra, err = cr.ReadHeader()
@ -737,32 +765,6 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) {
if _, err := io.ReadFull(cr, t.Value[:]); err != nil {
return err
}
// t.Epoch (abi.ChainEpoch) (int64)
case "Epoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Epoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it
@ -784,6 +786,22 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
return err
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealID")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
return err
}
// t.PublishCid (cid.Cid) (struct)
if len("PublishCid") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
@ -806,22 +824,6 @@ func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
}
}
// t.DealID (abi.DealID) (uint64)
if len("DealID") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealID\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("DealID")); err != nil {
return err
}
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
return err
}
// t.DealProposal (market.DealProposal) (struct)
if len("DealProposal") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
@ -910,7 +912,22 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.PublishCid (cid.Cid) (struct)
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.DealID = abi.DealID(extra)
}
// t.PublishCid (cid.Cid) (struct)
case "PublishCid":
{
@ -932,21 +949,6 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
t.PublishCid = &c
}
}
// t.DealID (abi.DealID) (uint64)
case "DealID":
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.DealID = abi.DealID(extra)
}
// t.DealProposal (market.DealProposal) (struct)
case "DealProposal":
@ -1140,28 +1142,6 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
return err
}
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
return err
}
}
// t.EndEpoch (abi.ChainEpoch) (int64)
if len("EndEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
@ -1183,6 +1163,28 @@ func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
return err
}
}
// t.StartEpoch (abi.ChainEpoch) (int64)
if len("StartEpoch") > cbg.MaxLength {
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
return err
}
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
return err
}
if t.StartEpoch >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
return err
}
}
return nil
}
@ -1224,33 +1226,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
}
switch name {
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
// t.EndEpoch (abi.ChainEpoch) (int64)
// t.EndEpoch (abi.ChainEpoch) (int64)
case "EndEpoch":
{
maj, extra, err := cr.ReadHeader()
@ -1267,7 +1243,7 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -1276,6 +1252,32 @@ func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) {
t.EndEpoch = abi.ChainEpoch(extraI)
}
// t.StartEpoch (abi.ChainEpoch) (int64)
case "StartEpoch":
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StartEpoch = abi.ChainEpoch(extraI)
}
default:
// Field doesn't exist on this type, so ignore it

View File

@ -27,7 +27,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/filestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-jsonrpc/auth"

View File

@ -21,7 +21,7 @@ import (
address "github.com/filecoin-project/go-address"
bitfield "github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
jsonrpc "github.com/filecoin-project/go-jsonrpc"
auth "github.com/filecoin-project/go-jsonrpc/auth"
@ -1296,6 +1296,21 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionByHash(arg0, arg1 interface
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHash", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHash), arg0, arg1)
}
// EthGetTransactionByHashLimited mocks base method.
func (m *MockFullNode) EthGetTransactionByHashLimited(arg0 context.Context, arg1 *ethtypes.EthHash, arg2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EthGetTransactionByHashLimited", arg0, arg1, arg2)
ret0, _ := ret[0].(*ethtypes.EthTx)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EthGetTransactionByHashLimited indicates an expected call of EthGetTransactionByHashLimited.
func (mr *MockFullNodeMockRecorder) EthGetTransactionByHashLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionByHashLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionByHashLimited), arg0, arg1, arg2)
}
// EthGetTransactionCount mocks base method.
func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthUint64, error) {
m.ctrl.T.Helper()
@ -1341,6 +1356,21 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionReceipt(arg0, arg1 interfac
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceipt", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceipt), arg0, arg1)
}
// EthGetTransactionReceiptLimited mocks base method.
func (m *MockFullNode) EthGetTransactionReceiptLimited(arg0 context.Context, arg1 ethtypes.EthHash, arg2 abi.ChainEpoch) (*api.EthTxReceipt, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "EthGetTransactionReceiptLimited", arg0, arg1, arg2)
ret0, _ := ret[0].(*api.EthTxReceipt)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// EthGetTransactionReceiptLimited indicates an expected call of EthGetTransactionReceiptLimited.
func (mr *MockFullNodeMockRecorder) EthGetTransactionReceiptLimited(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthGetTransactionReceiptLimited", reflect.TypeOf((*MockFullNode)(nil).EthGetTransactionReceiptLimited), arg0, arg1, arg2)
}
// EthMaxPriorityFeePerGas mocks base method.
func (m *MockFullNode) EthMaxPriorityFeePerGas(arg0 context.Context) (ethtypes.EthBigInt, error) {
m.ctrl.T.Helper()

View File

@ -18,7 +18,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
@ -290,12 +290,16 @@ type FullNodeMethods struct {
EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) `perm:"read"`
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `perm:"read"`
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) `perm:"read"`
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `perm:"read"`
EthGetTransactionReceipt func(p0 context.Context, p1 ethtypes.EthHash) (*EthTxReceipt, error) `perm:"read"`
EthGetTransactionReceiptLimited func(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) `perm:"read"`
EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"`
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"`
@ -692,18 +696,18 @@ type GatewayMethods struct {
EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) ``
EthGetTransactionByBlockHashAndIndex func(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) ``
EthGetTransactionByBlockNumberAndIndex func(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) ``
EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) ``
EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) ``
EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) ``
EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) ``
EthGetTransactionReceipt func(p0 context.Context, p1 ethtypes.EthHash) (*EthTxReceipt, error) ``
EthGetTransactionReceiptLimited func(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) ``
EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) ``
EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) ``
@ -782,6 +786,8 @@ type GatewayMethods struct {
StateVerifiedClientStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
StateVerifierStatus func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) ``
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
Version func(p0 context.Context) (APIVersion, error) ``
@ -961,7 +967,7 @@ type StorageMinerMethods struct {
MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
MarketListRetrievalDeals func(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) `perm:"read"`
MarketListRetrievalDeals func(p0 context.Context) ([]struct{}, error) `perm:"read"`
MarketPendingDeals func(p0 context.Context) (PendingDealInfo, error) `perm:"write"`
@ -2282,6 +2288,17 @@ func (s *FullNodeStub) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByHashLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionByHashLimited(p0, p1, p2)
}
func (s *FullNodeStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
if s.Internal.EthGetTransactionCount == nil {
return *new(ethtypes.EthUint64), ErrNotSupported
@ -2315,6 +2332,17 @@ func (s *FullNodeStub) EthGetTransactionReceipt(p0 context.Context, p1 ethtypes.
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
if s.Internal.EthGetTransactionReceiptLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionReceiptLimited(p0, p1, p2)
}
func (s *FullNodeStub) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) EthMaxPriorityFeePerGas(p0 context.Context) (ethtypes.EthBigInt, error) {
if s.Internal.EthMaxPriorityFeePerGas == nil {
return *new(ethtypes.EthBigInt), ErrNotSupported
@ -4427,28 +4455,6 @@ func (s *GatewayStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress
return *new(ethtypes.EthBytes), ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByBlockHashAndIndex(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByBlockHashAndIndex == nil {
return *new(ethtypes.EthTx), ErrNotSupported
}
return s.Internal.EthGetTransactionByBlockHashAndIndex(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionByBlockHashAndIndex(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
return *new(ethtypes.EthTx), ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByBlockNumberAndIndex(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByBlockNumberAndIndex == nil {
return *new(ethtypes.EthTx), ErrNotSupported
}
return s.Internal.EthGetTransactionByBlockNumberAndIndex(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionByBlockNumberAndIndex(p0 context.Context, p1 ethtypes.EthUint64, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) {
return *new(ethtypes.EthTx), ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByHash == nil {
return nil, ErrNotSupported
@ -4460,6 +4466,17 @@ func (s *GatewayStub) EthGetTransactionByHash(p0 context.Context, p1 *ethtypes.E
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
if s.Internal.EthGetTransactionByHashLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionByHashLimited(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) {
if s.Internal.EthGetTransactionCount == nil {
return *new(ethtypes.EthUint64), ErrNotSupported
@ -4493,6 +4510,17 @@ func (s *GatewayStub) EthGetTransactionReceipt(p0 context.Context, p1 ethtypes.E
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
if s.Internal.EthGetTransactionReceiptLimited == nil {
return nil, ErrNotSupported
}
return s.Internal.EthGetTransactionReceiptLimited(p0, p1, p2)
}
func (s *GatewayStub) EthGetTransactionReceiptLimited(p0 context.Context, p1 ethtypes.EthHash, p2 abi.ChainEpoch) (*EthTxReceipt, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) EthMaxPriorityFeePerGas(p0 context.Context) (ethtypes.EthBigInt, error) {
if s.Internal.EthMaxPriorityFeePerGas == nil {
return *new(ethtypes.EthBigInt), ErrNotSupported
@ -4922,6 +4950,17 @@ func (s *GatewayStub) StateVerifiedClientStatus(p0 context.Context, p1 address.A
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
if s.Internal.StateVerifierStatus == nil {
return nil, ErrNotSupported
}
return s.Internal.StateVerifierStatus(p0, p1, p2)
}
func (s *GatewayStub) StateVerifierStatus(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*abi.StoragePower, error) {
return nil, ErrNotSupported
}
func (s *GatewayStruct) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
if s.Internal.StateWaitMsg == nil {
return nil, ErrNotSupported
@ -5736,15 +5775,15 @@ func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]stor
return *new([]storagemarket.MinerDeal), ErrNotSupported
}
func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) {
func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) {
if s.Internal.MarketListRetrievalDeals == nil {
return *new([]retrievalmarket.ProviderDealState), ErrNotSupported
return *new([]struct{}), ErrNotSupported
}
return s.Internal.MarketListRetrievalDeals(p0)
}
func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]retrievalmarket.ProviderDealState, error) {
return *new([]retrievalmarket.ProviderDealState), ErrNotSupported
func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) {
return *new([]struct{}), ErrNotSupported
}
func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) {

View File

@ -8,13 +8,15 @@ import (
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-graphsync"
"github.com/ipld/go-ipld-prime"
"github.com/ipld/go-ipld-prime/codec/dagjson"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
@ -110,16 +112,12 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
IsSender: channelState.Sender() == hostID,
Message: channelState.Message(),
}
stringer, ok := channelState.Voucher().(fmt.Stringer)
if ok {
channel.Voucher = stringer.String()
voucher := channelState.Voucher()
voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode)
if err != nil {
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
} else {
voucherJSON, err := json.Marshal(channelState.Voucher())
if err != nil {
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
} else {
channel.Voucher = string(voucherJSON)
}
channel.Voucher = string(voucherJSON)
}
if channel.IsSender {
channel.IsInitiator = !channelState.IsPull()

View File

@ -10,7 +10,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -12,7 +12,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -20,7 +20,7 @@ import (
address "github.com/filecoin-project/go-address"
bitfield "github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
auth "github.com/filecoin-project/go-jsonrpc/auth"

View File

@ -601,6 +601,18 @@ func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) erro
})
}
func (b *Blockstore) Flush(context.Context) error {
if err := b.access(); err != nil {
return err
}
defer b.viewers.Done()
b.lockDB()
defer b.unlockDB()
return b.db.Sync()
}
// Has implements Blockstore.Has.
func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
if err := b.access(); err != nil {

View File

@ -18,6 +18,7 @@ type Blockstore interface {
blockstore.Blockstore
blockstore.Viewer
BatchDeleter
Flusher
}
// BasicBlockstore is an alias to the original IPFS Blockstore.
@ -25,6 +26,10 @@ type BasicBlockstore = blockstore.Blockstore
type Viewer = blockstore.Viewer
type Flusher interface {
Flush(context.Context) error
}
type BatchDeleter interface {
DeleteMany(ctx context.Context, cids []cid.Cid) error
}
@ -106,6 +111,13 @@ type adaptedBlockstore struct {
var _ Blockstore = (*adaptedBlockstore)(nil)
func (a *adaptedBlockstore) Flush(ctx context.Context) error {
if flusher, canFlush := a.Blockstore.(Flusher); canFlush {
return flusher.Flush(ctx)
}
return nil
}
func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
blk, err := a.Get(ctx, cid)
if err != nil {

View File

@ -46,6 +46,8 @@ var (
_ Viewer = (*BufferedBlockstore)(nil)
)
func (bs *BufferedBlockstore) Flush(ctx context.Context) error { return bs.write.Flush(ctx) }
func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
a, err := bs.read.AllKeysChan(ctx)
if err != nil {

View File

@ -38,6 +38,10 @@ func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) err
return b.bs.View(ctx, cid, f)
}
func (b *discardstore) Flush(ctx context.Context) error {
return nil
}
func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error {
return nil
}

View File

@ -179,3 +179,7 @@ func (b *idstore) Close() error {
}
return nil
}
func (b *idstore) Flush(ctx context.Context) error {
return b.bs.Flush(ctx)
}

View File

@ -17,6 +17,8 @@ func NewMemory() MemBlockstore {
// To match behavior of badger blockstore we index by multihash only.
type MemBlockstore map[string]blocks.Block
func (MemBlockstore) Flush(context.Context) error { return nil }
func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
delete(m, string(k.Hash()))
return nil

View File

@ -410,6 +410,8 @@ func (n *NetworkStore) HashOnRead(enabled bool) {
return
}
func (*NetworkStore) Flush(context.Context) error { return nil }
func (n *NetworkStore) Stop(ctx context.Context) error {
close(n.closing)

View File

@ -476,6 +476,23 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
}
}
func (s *SplitStore) Flush(ctx context.Context) error {
s.txnLk.RLock()
defer s.txnLk.RUnlock()
if err := s.cold.Flush(ctx); err != nil {
return err
}
if err := s.hot.Flush(ctx); err != nil {
return err
}
if err := s.ds.Sync(ctx, dstore.Key{}); err != nil {
return err
}
return nil
}
func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
if isIdentiyCid(blk.Cid()) {
return nil

View File

@ -452,7 +452,7 @@ func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) (int64, error)
},
func(c cid.Cid) error {
if s.txnMissing != nil {
log.Warnf("missing object reference %s in %s", c, root)
log.Debugf("missing object reference %s in %s", c, root)
s.txnRefsMx.Lock()
s.txnMissing[c] = struct{}{}
s.txnRefsMx.Unlock()
@ -983,7 +983,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
}
atomic.AddInt64(szWalk, sz)
sz, err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot)
sz, err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk)
if err != nil {
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}

View File

@ -77,6 +77,10 @@ func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error
return size, err
}
func (es *exposedSplitStore) Flush(ctx context.Context) error {
return es.s.Flush(ctx)
}
func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error {
return es.s.Put(ctx, blk)
}

View File

@ -757,6 +757,8 @@ func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error {
return nil
}
func (b *mockStore) Flush(context.Context) error { return nil }
func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, errors.New("not implemented")
}

View File

@ -20,6 +20,8 @@ type SyncBlockstore struct {
bs MemBlockstore // specifically use a memStore to save indirection overhead.
}
func (*SyncBlockstore) Flush(context.Context) error { return nil }
func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
m.mu.Lock()
defer m.mu.Unlock()

View File

@ -93,6 +93,16 @@ func (t *TimedCacheBlockstore) rotate() {
t.mu.Unlock()
}
func (t *TimedCacheBlockstore) Flush(ctx context.Context) error {
t.mu.Lock()
defer t.mu.Unlock()
if err := t.active.Flush(ctx); err != nil {
return err
}
return t.inactive.Flush(ctx)
}
func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error {
// Don't check the inactive set here. We want to keep this block for at
// least one interval.

View File

@ -55,6 +55,15 @@ func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, er
return size, err
}
func (m unionBlockstore) Flush(ctx context.Context) (err error) {
for _, bs := range m {
if err = bs.Flush(ctx); err != nil {
break
}
}
return err
}
func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) {
for _, bs := range m {
if err = bs.Put(ctx, block); err != nil {

View File

@ -69,10 +69,6 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
},
DrandIncentinet: {
Servers: []string{
"https://dev1.drand.sh",
"https://dev2.drand.sh",
},
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
},
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -3,14 +3,14 @@ package chain
import (
"fmt"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/build"
)
type BadBlockCache struct {
badBlocks *lru.ARCCache
badBlocks *lru.ARCCache[cid.Cid, BadBlockReason]
}
type BadBlockReason struct {
@ -43,7 +43,7 @@ func (bbr BadBlockReason) String() string {
}
func NewBadBlockCache() *BadBlockCache {
cache, err := lru.NewARC(build.BadBlockCacheSize)
cache, err := lru.NewARC[cid.Cid, BadBlockReason](build.BadBlockCacheSize)
if err != nil {
panic(err) // ok
}
@ -66,10 +66,5 @@ func (bts *BadBlockCache) Purge() {
}
func (bts *BadBlockCache) Has(c cid.Cid) (BadBlockReason, bool) {
rval, ok := bts.badBlocks.Get(c)
if !ok {
return BadBlockReason{}, false
}
return rval.(BadBlockReason), true
return bts.badBlocks.Get(c)
}

View File

@ -8,14 +8,14 @@ import (
dchain "github.com/drand/drand/chain"
dclient "github.com/drand/drand/client"
hclient "github.com/drand/drand/client/http"
"github.com/drand/drand/common/scheme"
dlog "github.com/drand/drand/log"
gclient "github.com/drand/drand/lp2p/client"
"github.com/drand/kyber"
kzap "github.com/go-kit/kit/log/zap"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
logging "github.com/ipfs/go-log/v2"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"go.uber.org/zap/zapcore"
"go.uber.org/zap"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
@ -61,7 +61,7 @@ type DrandBeacon struct {
filGenTime uint64
filRoundTime uint64
localCache *lru.Cache
localCache *lru.Cache[uint64, *types.BeaconEntry]
}
// DrandHTTPClient interface overrides the user agent used by drand
@ -69,6 +69,18 @@ type DrandHTTPClient interface {
SetUserAgent(string)
}
type logger struct {
*zap.SugaredLogger
}
func (l *logger) With(args ...interface{}) dlog.Logger {
return &logger{l.SugaredLogger.With(args...)}
}
func (l *logger) Named(s string) dlog.Logger {
return &logger{l.SugaredLogger.Named(s)}
}
func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) {
if genesisTs == 0 {
panic("what are you doing this cant be zero")
@ -79,9 +91,6 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err)
}
dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger(
log.SugaredLogger.Desugar(), zapcore.InfoLevel))
var clients []dclient.Client
for _, url := range config.Servers {
hc, err := hclient.NewWithInfo(url, drandChain, nil)
@ -96,7 +105,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
opts := []dclient.Option{
dclient.WithChainInfo(drandChain),
dclient.WithCacheSize(1024),
dclient.WithLogger(dlogger),
dclient.WithLogger(&logger{&log.SugaredLogger}),
}
if ps != nil {
@ -110,7 +119,7 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes
return nil, xerrors.Errorf("creating drand client: %w", err)
}
lc, err := lru.New(1024)
lc, err := lru.New[uint64, *types.BeaconEntry](1024)
if err != nil {
return nil, err
}
@ -160,16 +169,12 @@ func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Re
return out
}
func (db *DrandBeacon) cacheValue(e types.BeaconEntry) {
db.localCache.Add(e.Round, e)
db.localCache.Add(e.Round, &e)
}
func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry {
v, ok := db.localCache.Get(round)
if !ok {
return nil
}
e, _ := v.(types.BeaconEntry)
return &e
v, _ := db.localCache.Get(round)
return v
}
func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error {
@ -194,7 +199,7 @@ func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntr
Round: curr.Round,
Signature: curr.Data,
}
err := dchain.VerifyBeacon(db.pubkey, b)
err := dchain.NewVerifier(scheme.GetSchemeFromEnv()).VerifyBeacon(*b, db.pubkey)
if err == nil {
db.cacheValue(curr)
}

View File

@ -3,6 +3,7 @@
package drand
import (
"context"
"os"
"testing"
@ -20,11 +21,11 @@ func TestPrintGroupInfo(t *testing.T) {
c, err := hclient.New(server, nil, nil)
assert.NoError(t, err)
cg := c.(interface {
FetchChainInfo(groupHash []byte) (*dchain.Info, error)
FetchChainInfo(ctx context.Context, groupHash []byte) (*dchain.Info, error)
})
chain, err := cg.FetchChainInfo(nil)
chain, err := cg.FetchChainInfo(context.Background(), nil)
assert.NoError(t, err)
err = chain.ToJSON(os.Stdout)
err = chain.ToJSON(os.Stdout, nil)
assert.NoError(t, err)
}

View File

@ -5,7 +5,7 @@ import (
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/lotus/build"
@ -17,7 +17,7 @@ type blockReceiptTracker struct {
// using an LRU cache because i don't want to handle all the edge cases for
// manual cleanup and maintenance of a fixed size set
cache *lru.Cache
cache *lru.Cache[types.TipSetKey, *peerSet]
}
type peerSet struct {
@ -25,7 +25,7 @@ type peerSet struct {
}
func newBlockReceiptTracker() *blockReceiptTracker {
c, _ := lru.New(512)
c, _ := lru.New[types.TipSetKey, *peerSet](512)
return &blockReceiptTracker{
cache: c,
}
@ -46,20 +46,18 @@ func (brt *blockReceiptTracker) Add(p peer.ID, ts *types.TipSet) {
return
}
val.(*peerSet).peers[p] = build.Clock.Now()
val.peers[p] = build.Clock.Now()
}
func (brt *blockReceiptTracker) GetPeers(ts *types.TipSet) []peer.ID {
brt.lk.Lock()
defer brt.lk.Unlock()
val, ok := brt.cache.Get(ts.Key())
ps, ok := brt.cache.Get(ts.Key())
if !ok {
return nil
}
ps := val.(*peerSet)
out := make([]peer.ID, 0, len(ps.peers))
for p := range ps.peers {
out = append(out, p)

View File

@ -3,6 +3,7 @@ package consensus
import (
"context"
"sync/atomic"
"time"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
@ -113,6 +114,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return sm.VMConstructor()(ctx, vmopt)
}
var cronGas int64
runCron := func(vmCron vm.Interface, epoch abi.ChainEpoch) error {
cronMsg := &types.Message{
To: cron.Address,
@ -130,6 +133,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return xerrors.Errorf("running cron: %w", err)
}
cronGas += ret.GasUsed
if em != nil {
if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil {
return xerrors.Errorf("callback failed on cron message: %w", err)
@ -181,7 +186,9 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
}
}
partDone()
vmEarly := partDone()
earlyCronGas := cronGas
cronGas = 0
partDone = metrics.Timer(ctx, metrics.VMApplyMessages)
vmi, err := makeVm(pstate, epoch, ts.MinTimestamp())
@ -196,6 +203,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
processedMsgs = make(map[cid.Cid]struct{})
)
var msgGas int64
for _, b := range bms {
penalty := types.NewInt(0)
gasReward := big.Zero()
@ -210,6 +219,8 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return cid.Undef, cid.Undef, err
}
msgGas += r.GasUsed
receipts = append(receipts, &r.MessageReceipt)
gasReward = big.Add(gasReward, r.GasCosts.MinerTip)
penalty = big.Add(penalty, r.GasCosts.MinerPenalty)
@ -239,14 +250,14 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
}
}
partDone()
vmMsg := partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyCron)
if err := runCron(vmi, epoch); err != nil {
return cid.Cid{}, cid.Cid{}, err
}
partDone()
vmCron := partDone()
partDone = metrics.Timer(ctx, metrics.VMApplyFlush)
rectarr := blockadt.MakeEmptyArray(sm.ChainStore().ActorStore(ctx))
@ -282,6 +293,11 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err)
}
vmFlush := partDone()
partDone = func() time.Duration { return time.Duration(0) }
log.Infow("ApplyBlocks stats", "early", vmEarly, "earlyCronGas", earlyCronGas, "vmMsg", vmMsg, "msgGas", msgGas, "vmCron", vmCron, "cronGas", cronGas, "vmFlush", vmFlush, "epoch", epoch, "tsk", ts.Key())
stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))),
metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied))))

View File

@ -4,7 +4,7 @@ import (
"context"
"sync"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/api"
@ -14,14 +14,14 @@ type messageCache struct {
api EventAPI
blockMsgLk sync.Mutex
blockMsgCache *lru.ARCCache
blockMsgCache *lru.ARCCache[cid.Cid, *api.BlockMessages]
}
func newMessageCache(api EventAPI) *messageCache {
blsMsgCache, _ := lru.NewARC(500)
func newMessageCache(a EventAPI) *messageCache {
blsMsgCache, _ := lru.NewARC[cid.Cid, *api.BlockMessages](500)
return &messageCache{
api: api,
api: a,
blockMsgCache: blsMsgCache,
}
}
@ -30,14 +30,14 @@ func (c *messageCache) ChainGetBlockMessages(ctx context.Context, blkCid cid.Cid
c.blockMsgLk.Lock()
defer c.blockMsgLk.Unlock()
msgsI, ok := c.blockMsgCache.Get(blkCid)
msgs, ok := c.blockMsgCache.Get(blkCid)
var err error
if !ok {
msgsI, err = c.api.ChainGetBlockMessages(ctx, blkCid)
msgs, err = c.api.ChainGetBlockMessages(ctx, blkCid)
if err != nil {
return nil, err
}
c.blockMsgCache.Add(blkCid, msgsI)
c.blockMsgCache.Add(blkCid, msgs)
}
return msgsI.(*api.BlockMessages), nil
return msgs, nil
}

View File

@ -34,6 +34,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -256,7 +257,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS
//return nil, xerrors.Errorf("creating drand beacon: %w", err)
//}
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, index.DummyMsgIndex)
if err != nil {
return nil, xerrors.Errorf("initing stmgr: %w", err)
}

45
chain/index/interface.go Normal file
View File

@ -0,0 +1,45 @@
package index
import (
"context"
"errors"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
)
var ErrNotFound = errors.New("message not found")
var ErrClosed = errors.New("index closed")
// MsgInfo is the Message metadata the index tracks.
type MsgInfo struct {
// the message this record refers to
Message cid.Cid
// the tipset where this message was included
TipSet cid.Cid
// the epoch where this message was included
Epoch abi.ChainEpoch
}
// MsgIndex is the interface to the message index
type MsgIndex interface {
// GetMsgInfo retrieves the message metadata through the index.
// The lookup is done using the onchain message Cid; that is the signed message Cid
// for SECP messages and unsigned message Cid for BLS messages.
GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error)
// Close closes the index
Close() error
}
type dummyMsgIndex struct{}
func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) {
return MsgInfo{}, ErrNotFound
}
func (dummyMsgIndex) Close() error {
return nil
}
var DummyMsgIndex MsgIndex = dummyMsgIndex{}

466
chain/index/msgindex.go Normal file
View File

@ -0,0 +1,466 @@
package index
import (
"context"
"database/sql"
"errors"
"io/fs"
"os"
"path"
"sync"
"time"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
_ "github.com/mattn/go-sqlite3"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
var log = logging.Logger("msgindex")
var dbName = "msgindex.db"
var dbDefs = []string{
`CREATE TABLE IF NOT EXISTS messages (
cid VARCHAR(80) PRIMARY KEY ON CONFLICT REPLACE,
tipset_cid VARCHAR(80) NOT NULL,
epoch INTEGER NOT NULL
)`,
`CREATE INDEX IF NOT EXISTS tipset_cids ON messages (tipset_cid)
`,
`CREATE TABLE IF NOT EXISTS _meta (
version UINT64 NOT NULL UNIQUE
)`,
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
}
var dbPragmas = []string{}
const (
// prepared stmts
dbqGetMessageInfo = "SELECT tipset_cid, epoch FROM messages WHERE cid = ?"
dbqInsertMessage = "INSERT INTO messages VALUES (?, ?, ?)"
dbqDeleteTipsetMessages = "DELETE FROM messages WHERE tipset_cid = ?"
// reconciliation
dbqCountMessages = "SELECT COUNT(*) FROM messages"
dbqMinEpoch = "SELECT MIN(epoch) FROM messages"
dbqCountTipsetMessages = "SELECT COUNT(*) FROM messages WHERE tipset_cid = ?"
dbqDeleteMessagesByEpoch = "DELETE FROM messages WHERE epoch >= ?"
)
// coalescer configuration (TODO: use observer instead)
// these are exposed to make tests snappy
var (
CoalesceMinDelay = time.Second
CoalesceMaxDelay = 15 * time.Second
CoalesceMergeInterval = time.Second
)
// chain store interface; we could use store.ChainStore directly,
// but this simplifies unit testing.
type ChainStore interface {
SubscribeHeadChanges(f store.ReorgNotifee)
MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error)
GetHeaviestTipSet() *types.TipSet
GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
}
var _ ChainStore = (*store.ChainStore)(nil)
type msgIndex struct {
cs ChainStore
db *sql.DB
selectMsgStmt *sql.Stmt
insertMsgStmt *sql.Stmt
deleteTipSetStmt *sql.Stmt
sema chan struct{}
mx sync.Mutex
pend []headChange
cancel func()
workers sync.WaitGroup
closeLk sync.RWMutex
closed bool
}
var _ MsgIndex = (*msgIndex)(nil)
type headChange struct {
rev []*types.TipSet
app []*types.TipSet
}
func NewMsgIndex(lctx context.Context, basePath string, cs ChainStore) (MsgIndex, error) {
var (
dbPath string
exists bool
err error
)
err = os.MkdirAll(basePath, 0755)
if err != nil {
return nil, xerrors.Errorf("error creating msgindex base directory: %w", err)
}
dbPath = path.Join(basePath, dbName)
_, err = os.Stat(dbPath)
switch {
case err == nil:
exists = true
case errors.Is(err, fs.ErrNotExist):
case err != nil:
return nil, xerrors.Errorf("error stating msgindex database: %w", err)
}
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
// TODO [nice to have]: automaticaly delete corrupt databases
// but for now we can just error and let the operator delete.
return nil, xerrors.Errorf("error opening msgindex database: %w", err)
}
if err := prepareDB(db); err != nil {
return nil, xerrors.Errorf("error creating msgindex database: %w", err)
}
// TODO we may consider populating the index when first creating the db
if exists {
if err := reconcileIndex(db, cs); err != nil {
return nil, xerrors.Errorf("error reconciling msgindex database: %w", err)
}
}
ctx, cancel := context.WithCancel(lctx)
msgIndex := &msgIndex{
db: db,
cs: cs,
sema: make(chan struct{}, 1),
cancel: cancel,
}
err = msgIndex.prepareStatements()
if err != nil {
if err := db.Close(); err != nil {
log.Errorf("error closing msgindex database: %s", err)
}
return nil, xerrors.Errorf("error preparing msgindex database statements: %w", err)
}
rnf := store.WrapHeadChangeCoalescer(
msgIndex.onHeadChange,
CoalesceMinDelay,
CoalesceMaxDelay,
CoalesceMergeInterval,
)
cs.SubscribeHeadChanges(rnf)
msgIndex.workers.Add(1)
go msgIndex.background(ctx)
return msgIndex, nil
}
// init utilities
func prepareDB(db *sql.DB) error {
for _, stmt := range dbDefs {
if _, err := db.Exec(stmt); err != nil {
return xerrors.Errorf("error executing sql statement '%s': %w", stmt, err)
}
}
for _, stmt := range dbPragmas {
if _, err := db.Exec(stmt); err != nil {
return xerrors.Errorf("error executing sql statement '%s': %w", stmt, err)
}
}
return nil
}
func reconcileIndex(db *sql.DB, cs ChainStore) error {
// Invariant: after reconciliation, every tipset in the index is in the current chain; ie either
// the chain head or reachable by walking the chain.
// Algorithm:
// 1. Count messages in index; if none, trivially reconciled.
// TODO we may consider populating the index in that case
// 2. Find the minimum tipset in the index; this will mark the end of the reconciliation walk
// 3. Walk from current tipset until we find a tipset in the index.
// 4. Delete (revert!) all tipsets above the found tipset.
// 5. If the walk ends in the boundary epoch, then delete everything.
//
row := db.QueryRow(dbqCountMessages)
var result int64
if err := row.Scan(&result); err != nil {
return xerrors.Errorf("error counting messages: %w", err)
}
if result == 0 {
return nil
}
row = db.QueryRow(dbqMinEpoch)
if err := row.Scan(&result); err != nil {
return xerrors.Errorf("error finding boundary epoch: %w", err)
}
boundaryEpoch := abi.ChainEpoch(result)
countMsgsStmt, err := db.Prepare(dbqCountTipsetMessages)
if err != nil {
return xerrors.Errorf("error preparing statement: %w", err)
}
curTs := cs.GetHeaviestTipSet()
for curTs != nil && curTs.Height() >= boundaryEpoch {
tsCid, err := curTs.Key().Cid()
if err != nil {
return xerrors.Errorf("error computing tipset cid: %w", err)
}
key := tsCid.String()
row = countMsgsStmt.QueryRow(key)
if err := row.Scan(&result); err != nil {
return xerrors.Errorf("error counting messages: %w", err)
}
if result > 0 {
// found it!
boundaryEpoch = curTs.Height() + 1
break
}
// walk up
parents := curTs.Parents()
curTs, err = cs.GetTipSetFromKey(context.TODO(), parents)
if err != nil {
return xerrors.Errorf("error walking chain: %w", err)
}
}
// delete everything above the minEpoch
if _, err = db.Exec(dbqDeleteMessagesByEpoch, int64(boundaryEpoch)); err != nil {
return xerrors.Errorf("error deleting stale reorged out message: %w", err)
}
return nil
}
func (x *msgIndex) prepareStatements() error {
stmt, err := x.db.Prepare(dbqGetMessageInfo)
if err != nil {
return xerrors.Errorf("prepare selectMsgStmt: %w", err)
}
x.selectMsgStmt = stmt
stmt, err = x.db.Prepare(dbqInsertMessage)
if err != nil {
return xerrors.Errorf("prepare insertMsgStmt: %w", err)
}
x.insertMsgStmt = stmt
stmt, err = x.db.Prepare(dbqDeleteTipsetMessages)
if err != nil {
return xerrors.Errorf("prepare deleteTipSetStmt: %w", err)
}
x.deleteTipSetStmt = stmt
return nil
}
// head change notifee
func (x *msgIndex) onHeadChange(rev, app []*types.TipSet) error {
x.closeLk.RLock()
defer x.closeLk.RUnlock()
if x.closed {
return nil
}
// do it in the background to avoid blocking head change processing
x.mx.Lock()
x.pend = append(x.pend, headChange{rev: rev, app: app})
pendLen := len(x.pend)
x.mx.Unlock()
// complain loudly if this is building backlog
if pendLen > 10 {
log.Warnf("message index head change processing is building backlog: %d pending head changes", pendLen)
}
select {
case x.sema <- struct{}{}:
default:
}
return nil
}
func (x *msgIndex) background(ctx context.Context) {
defer x.workers.Done()
for {
select {
case <-x.sema:
err := x.processHeadChanges(ctx)
if err != nil {
// we can't rely on an inconsistent index, so shut it down.
log.Errorf("error processing head change notifications: %s; shutting down message index", err)
if err2 := x.Close(); err2 != nil {
log.Errorf("error shutting down index: %s", err2)
}
}
case <-ctx.Done():
return
}
}
}
func (x *msgIndex) processHeadChanges(ctx context.Context) error {
x.mx.Lock()
pend := x.pend
x.pend = nil
x.mx.Unlock()
tx, err := x.db.Begin()
if err != nil {
return xerrors.Errorf("error creating transaction: %w", err)
}
for _, hc := range pend {
for _, ts := range hc.rev {
if err := x.doRevert(ctx, tx, ts); err != nil {
if err2 := tx.Rollback(); err2 != nil {
log.Errorf("error rolling back transaction: %s", err2)
}
return xerrors.Errorf("error reverting %s: %w", ts, err)
}
}
for _, ts := range hc.app {
if err := x.doApply(ctx, tx, ts); err != nil {
if err2 := tx.Rollback(); err2 != nil {
log.Errorf("error rolling back transaction: %s", err2)
}
return xerrors.Errorf("error applying %s: %w", ts, err)
}
}
}
return tx.Commit()
}
func (x *msgIndex) doRevert(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error {
tskey, err := ts.Key().Cid()
if err != nil {
return xerrors.Errorf("error computing tipset cid: %w", err)
}
key := tskey.String()
_, err = tx.Stmt(x.deleteTipSetStmt).Exec(key)
return err
}
func (x *msgIndex) doApply(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error {
tscid, err := ts.Key().Cid()
if err != nil {
return xerrors.Errorf("error computing tipset cid: %w", err)
}
tskey := tscid.String()
epoch := int64(ts.Height())
msgs, err := x.cs.MessagesForTipset(ctx, ts)
if err != nil {
return xerrors.Errorf("error retrieving messages for tipset %s: %w", ts, err)
}
insertStmt := tx.Stmt(x.insertMsgStmt)
for _, msg := range msgs {
key := msg.Cid().String()
if _, err := insertStmt.Exec(key, tskey, epoch); err != nil {
return xerrors.Errorf("error inserting message: %w", err)
}
}
return nil
}
// interface
func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) {
x.closeLk.RLock()
defer x.closeLk.RUnlock()
if x.closed {
return MsgInfo{}, ErrClosed
}
var (
tipset string
epoch int64
)
key := m.String()
row := x.selectMsgStmt.QueryRow(key)
err := row.Scan(&tipset, &epoch)
switch {
case err == sql.ErrNoRows:
return MsgInfo{}, ErrNotFound
case err != nil:
return MsgInfo{}, xerrors.Errorf("error querying msgindex database: %w", err)
}
tipsetCid, err := cid.Decode(tipset)
if err != nil {
return MsgInfo{}, xerrors.Errorf("error decoding tipset cid: %w", err)
}
return MsgInfo{
Message: m,
TipSet: tipsetCid,
Epoch: abi.ChainEpoch(epoch),
}, nil
}
func (x *msgIndex) Close() error {
x.closeLk.Lock()
defer x.closeLk.Unlock()
if x.closed {
return nil
}
x.closed = true
x.cancel()
x.workers.Wait()
return x.db.Close()
}
// informal apis for itests; not exposed in the main interface
func (x *msgIndex) CountMessages() (int64, error) {
x.closeLk.RLock()
defer x.closeLk.RUnlock()
if x.closed {
return 0, ErrClosed
}
var result int64
row := x.db.QueryRow(dbqCountMessages)
err := row.Scan(&result)
return result, err
}

View File

@ -0,0 +1,298 @@
package index
import (
"context"
"errors"
"math/rand"
"os"
"testing"
"time"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-libipfs/blocks"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/mock"
)
func TestBasicMsgIndex(t *testing.T) {
// the most basic of tests:
// 1. Create an index with mock chain store
// 2. Advance the chain for a few tipsets
// 3. Verify that the index contains all messages with the correct tipset/epoch
cs := newMockChainStore()
cs.genesis()
tmp := t.TempDir()
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
defer msgIndex.Close() //nolint
for i := 0; i < 10; i++ {
t.Logf("advance to epoch %d", i+1)
err := cs.advance()
require.NoError(t, err)
// wait for the coalescer to notify
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
}
t.Log("verifying index")
verifyIndex(t, cs, msgIndex)
}
func TestReorgMsgIndex(t *testing.T) {
// slightly more nuanced test that includes reorgs
// 1. Create an index with mock chain store
// 2. Advance/Reorg the chain for a few tipsets
// 3. Verify that the index contains all messages with the correct tipst/epoch
cs := newMockChainStore()
cs.genesis()
tmp := t.TempDir()
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
defer msgIndex.Close() //nolint
for i := 0; i < 10; i++ {
t.Logf("advance to epoch %d", i+1)
err := cs.advance()
require.NoError(t, err)
// wait for the coalescer to notify
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
}
// a simple reorg
t.Log("doing reorg")
reorgme := cs.curTs
reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents())
require.NoError(t, err)
cs.setHead(reorgmeParent)
reorgmeChild := cs.makeBlk()
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
require.NoError(t, err)
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
t.Log("verifying index")
verifyIndex(t, cs, msgIndex)
t.Log("verifying that reorged messages are not present")
verifyMissing(t, cs, msgIndex, reorgme)
}
func TestReconcileMsgIndex(t *testing.T) {
// test that exercises the reconciliation code paths
// 1. Create and populate a basic msgindex, similar to TestBasicMsgIndex.
// 2. Close it
// 3. Reorg the mock chain store
// 4. Reopen the index to trigger reconciliation
// 5. Enxure that only the stable messages remain.
cs := newMockChainStore()
cs.genesis()
tmp := t.TempDir()
t.Cleanup(func() { _ = os.RemoveAll(tmp) })
msgIndex, err := NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
for i := 0; i < 10; i++ {
t.Logf("advance to epoch %d", i+1)
err := cs.advance()
require.NoError(t, err)
// wait for the coalescer to notify
time.Sleep(CoalesceMinDelay + 10*time.Millisecond)
}
// Close it and reorg
err = msgIndex.Close()
require.NoError(t, err)
cs.notify = nil
// a simple reorg
t.Log("doing reorg")
reorgme := cs.curTs
reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents())
require.NoError(t, err)
cs.setHead(reorgmeParent)
reorgmeChild := cs.makeBlk()
err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild})
require.NoError(t, err)
// reopen to reconcile
msgIndex, err = NewMsgIndex(context.Background(), tmp, cs)
require.NoError(t, err)
defer msgIndex.Close() //nolint
t.Log("verifying index")
// need to step one up because the last tipset is not known by the index
cs.setHead(reorgmeParent)
verifyIndex(t, cs, msgIndex)
t.Log("verifying that reorged and unknown messages are not present")
verifyMissing(t, cs, msgIndex, reorgme, reorgmeChild)
}
func verifyIndex(t *testing.T, cs *mockChainStore, msgIndex MsgIndex) {
for ts := cs.curTs; ts.Height() > 0; {
t.Logf("verify at height %d", ts.Height())
blks := ts.Blocks()
if len(blks) == 0 {
break
}
tsCid, err := ts.Key().Cid()
require.NoError(t, err)
msgs, err := cs.MessagesForTipset(context.Background(), ts)
require.NoError(t, err)
for _, m := range msgs {
minfo, err := msgIndex.GetMsgInfo(context.Background(), m.Cid())
require.NoError(t, err)
require.Equal(t, tsCid, minfo.TipSet)
require.Equal(t, ts.Height(), minfo.Epoch)
}
parents := ts.Parents()
ts, err = cs.GetTipSetFromKey(context.Background(), parents)
require.NoError(t, err)
}
}
func verifyMissing(t *testing.T, cs *mockChainStore, msgIndex MsgIndex, missing ...*types.TipSet) {
for _, ts := range missing {
msgs, err := cs.MessagesForTipset(context.Background(), ts)
require.NoError(t, err)
for _, m := range msgs {
_, err := msgIndex.GetMsgInfo(context.Background(), m.Cid())
require.Equal(t, ErrNotFound, err)
}
}
}
type mockChainStore struct {
notify store.ReorgNotifee
curTs *types.TipSet
tipsets map[types.TipSetKey]*types.TipSet
msgs map[types.TipSetKey][]types.ChainMsg
nonce uint64
}
var _ ChainStore = (*mockChainStore)(nil)
var systemAddr address.Address
var rng *rand.Rand
func init() {
systemAddr, _ = address.NewIDAddress(0)
rng = rand.New(rand.NewSource(314159))
// adjust those to make tests snappy
CoalesceMinDelay = 100 * time.Millisecond
CoalesceMaxDelay = time.Second
CoalesceMergeInterval = 100 * time.Millisecond
}
func newMockChainStore() *mockChainStore {
return &mockChainStore{
tipsets: make(map[types.TipSetKey]*types.TipSet),
msgs: make(map[types.TipSetKey][]types.ChainMsg),
}
}
func (cs *mockChainStore) genesis() {
genBlock := mock.MkBlock(nil, 0, 0)
genTs := mock.TipSet(genBlock)
cs.msgs[genTs.Key()] = nil
cs.setHead(genTs)
}
func (cs *mockChainStore) setHead(ts *types.TipSet) {
cs.curTs = ts
cs.tipsets[ts.Key()] = ts
}
func (cs *mockChainStore) advance() error {
ts := cs.makeBlk()
return cs.reorg(nil, []*types.TipSet{ts})
}
func (cs *mockChainStore) reorg(rev, app []*types.TipSet) error {
for _, ts := range rev {
parents := ts.Parents()
cs.curTs = cs.tipsets[parents]
}
for _, ts := range app {
cs.tipsets[ts.Key()] = ts
cs.curTs = ts
}
if cs.notify != nil {
return cs.notify(rev, app)
}
return nil
}
func (cs *mockChainStore) makeBlk() *types.TipSet {
height := cs.curTs.Height() + 1
blk := mock.MkBlock(cs.curTs, uint64(height), uint64(height))
blk.Messages = cs.makeGarbageCid()
ts := mock.TipSet(blk)
msg1 := cs.makeMsg()
msg2 := cs.makeMsg()
cs.msgs[ts.Key()] = []types.ChainMsg{msg1, msg2}
return ts
}
func (cs *mockChainStore) makeMsg() *types.Message {
nonce := cs.nonce
cs.nonce++
return &types.Message{To: systemAddr, From: systemAddr, Nonce: nonce}
}
func (cs *mockChainStore) makeGarbageCid() cid.Cid {
garbage := blocks.NewBlock([]byte{byte(rng.Intn(256)), byte(rng.Intn(256)), byte(rng.Intn(256))})
return garbage.Cid()
}
func (cs *mockChainStore) SubscribeHeadChanges(f store.ReorgNotifee) {
cs.notify = f
}
func (cs *mockChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) {
msgs, ok := cs.msgs[ts.Key()]
if !ok {
return nil, errors.New("unknown tipset")
}
return msgs, nil
}
func (cs *mockChainStore) GetHeaviestTipSet() *types.TipSet {
return cs.curTs
}
func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
ts, ok := cs.tipsets[tsk]
if !ok {
return nil, errors.New("unknown tipset")
}
return ts, nil
}

View File

@ -12,7 +12,7 @@ import (
"time"
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
@ -159,7 +159,7 @@ type MessagePool struct {
// pruneCooldown is a channel used to allow a cooldown time between prunes
pruneCooldown chan struct{}
blsSigCache *lru.TwoQueueCache
blsSigCache *lru.TwoQueueCache[cid.Cid, crypto.Signature]
changes *lps.PubSub
@ -167,9 +167,9 @@ type MessagePool struct {
netName dtypes.NetworkName
sigValCache *lru.TwoQueueCache
sigValCache *lru.TwoQueueCache[string, struct{}]
nonceCache *lru.Cache
nonceCache *lru.Cache[nonceCacheKey, uint64]
evtTypes [3]journal.EventType
journal journal.Journal
@ -369,9 +369,9 @@ func (ms *msgSet) toSlice() []*types.SignedMessage {
}
func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) {
cache, _ := lru.New2Q(build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q(build.VerifSigCacheSize)
noncecache, _ := lru.New(256)
cache, _ := lru.New2Q[cid.Cid, crypto.Signature](build.BlsSignatureCacheSize)
verifcache, _ := lru.New2Q[string, struct{}](build.VerifSigCacheSize)
noncecache, _ := lru.New[nonceCacheKey, uint64](256)
cfg, err := loadConfig(ctx, ds)
if err != nil {
@ -1053,7 +1053,7 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address,
n, ok := mp.nonceCache.Get(nk)
if ok {
return n.(uint64), nil
return n, nil
}
act, err := mp.api.GetActorAfter(addr, ts)
@ -1473,15 +1473,10 @@ func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.Bloc
}
func (mp *MessagePool) RecoverSig(msg *types.Message) *types.SignedMessage {
val, ok := mp.blsSigCache.Get(msg.Cid())
sig, ok := mp.blsSigCache.Get(msg.Cid())
if !ok {
return nil
}
sig, ok := val.(crypto.Signature)
if !ok {
log.Errorf("value in signature cache was not a signature (got %T)", val)
return nil
}
return &types.SignedMessage{
Message: *msg,

View File

@ -8,6 +8,7 @@ import (
"go.opencensus.io/trace"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
@ -52,6 +53,22 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
sm.stlk.Unlock()
if ts.Height() == 0 {
// NB: This is here because the process that executes blocks requires that the
// block miner reference a valid miner in the state tree. Unless we create some
// magical genesis miner, this won't work properly, so we short circuit here
// This avoids the question of 'who gets paid the genesis block reward'.
// This also makes us not attempt to lookup the tipset state with
// tryLookupTipsetState, which would cause a very long, very slow walk.
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
}
// First, try to find the tipset in the current chain. If found, we can avoid re-executing
// it.
if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found {
return st, rec, nil
}
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false)
if err != nil {
return cid.Undef, cid.Undef, err
@ -60,6 +77,51 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
return st, rec, nil
}
// Try to lookup a state & receipt CID for a given tipset by walking the chain instead of executing
// it. This will only successfully return the state/receipt CIDs if they're found in the state
// store.
//
// NOTE: This _won't_ recursively walk the receipt/state trees. It assumes that having the root
// implies having the rest of the tree. However, lotus generally makes that assumption anyways.
func tryLookupTipsetState(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) (cid.Cid, cid.Cid, bool) {
nextTs, err := cs.GetTipsetByHeight(ctx, ts.Height()+1, nil, false)
if err != nil {
// Nothing to see here. The requested height may be beyond the current head.
return cid.Undef, cid.Undef, false
}
// Make sure we're on the correct fork.
if nextTs.Parents() != ts.Key() {
// Also nothing to see here. This just means that the requested tipset is on a
// different fork.
return cid.Undef, cid.Undef, false
}
stateCid := nextTs.ParentState()
receiptCid := nextTs.ParentMessageReceipts()
// Make sure we have the parent state.
if hasState, err := cs.StateBlockstore().Has(ctx, stateCid); err != nil {
log.Errorw("failed to lookup state-root in blockstore", "cid", stateCid, "error", err)
return cid.Undef, cid.Undef, false
} else if !hasState {
// We have the chain but don't have the state. It looks like we need to try
// executing?
return cid.Undef, cid.Undef, false
}
// Make sure we have the receipts.
if hasReceipts, err := cs.ChainBlockstore().Has(ctx, receiptCid); err != nil {
log.Errorw("failed to lookup receipts in blockstore", "cid", receiptCid, "error", err)
return cid.Undef, cid.Undef, false
} else if !hasReceipts {
// If we don't have the receipts, re-execute and try again.
return cid.Undef, cid.Undef, false
}
return stateCid, receiptCid, true
}
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em, true)
return st, err

View File

@ -36,6 +36,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
. "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
@ -168,7 +169,7 @@ func TestForkHeightTriggers(t *testing.T) {
}
return st.Flush(ctx)
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}
@ -286,7 +287,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
migrationCount++
return root, nil
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore())
}}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}
@ -504,7 +505,7 @@ func TestForkPreMigration(t *testing.T) {
return nil
},
}}},
}, cg.BeaconSchedule(), datastore.NewMapDatastore())
}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}
@ -579,6 +580,7 @@ func TestDisablePreMigration(t *testing.T) {
},
cg.BeaconSchedule(),
datastore.NewMapDatastore(),
index.DummyMsgIndex,
)
require.NoError(t, err)
require.NoError(t, sm.Start(context.Background()))
@ -633,6 +635,7 @@ func TestMigrtionCache(t *testing.T) {
},
cg.BeaconSchedule(),
metadataDs,
index.DummyMsgIndex,
)
require.NoError(t, err)
require.NoError(t, sm.Start(context.Background()))
@ -685,6 +688,7 @@ func TestMigrtionCache(t *testing.T) {
},
cg.BeaconSchedule(),
metadataDs,
index.DummyMsgIndex,
)
require.NoError(t, err)
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {

View File

@ -10,6 +10,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
@ -18,6 +19,7 @@ import (
// happened, with an optional limit to how many epochs it will search. It guarantees that the message has been on
// chain for at least confidence epochs without being reverted before returning.
func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
// TODO use the index to speed this up.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -145,7 +147,30 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet
return head, r, foundMsg, nil
}
fts, r, foundMsg, err := sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
fts, r, foundMsg, err := sm.searchForIndexedMsg(ctx, mcid, msg)
switch {
case err == nil:
if r != nil && foundMsg.Defined() {
return fts, r, foundMsg, nil
}
// debug log this, it's noteworthy
if r == nil {
log.Debugf("missing receipt for message in index for %s", mcid)
}
if !foundMsg.Defined() {
log.Debugf("message %s not found", mcid)
}
case errors.Is(err, index.ErrNotFound):
// ok for the index to have incomplete data
default:
log.Warnf("error searching message index: %s", err)
}
fts, r, foundMsg, err = sm.searchBackForMsg(ctx, head, msg, lookbackLimit, allowReplaced)
if err != nil {
log.Warnf("failed to look back through chain for message %s", mcid)
@ -159,6 +184,41 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet
return fts, r, foundMsg, nil
}
func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) {
minfo, err := sm.msgIndex.GetMsgInfo(ctx, mcid)
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in index: %w", err)
}
// check the height against the current tipset; minimum execution confidence requires that the
// inclusion tipset height is lower than the current head + 1
curTs := sm.cs.GetHeaviestTipSet()
if curTs.Height() <= minfo.Epoch+1 {
return nil, nil, cid.Undef, xerrors.Errorf("indexed message does not appear before the current tipset; index epoch: %d, current epoch: %d", minfo.Epoch, curTs.Height())
}
// now get the execution tipset
// TODO optimization: the index should have it implicitly so we can return it in the msginfo.
xts, err := sm.cs.GetTipsetByHeight(ctx, minfo.Epoch+1, curTs, false)
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("error looking up execution tipset: %w", err)
}
// check that the parent of the execution index is indeed the inclusion tipset
parent := xts.Parents()
parentCid, err := parent.Cid()
if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("error computing tipset cid: %w", err)
}
if !parentCid.Equals(minfo.TipSet) {
return nil, nil, cid.Undef, xerrors.Errorf("inclusion tipset mismatch: have %s, expected %s", parentCid, minfo.TipSet)
}
r, foundMsg, err := sm.tipsetExecutedMessage(ctx, xts, mcid, m.VMMessage(), false)
return xts, r, foundMsg, xerrors.Errorf("error in tipstExecutedMessage: %w", err)
}
// searchBackForMsg searches up to limit tipsets backwards from the given
// tipset for a message receipt.
// If limit is

View File

@ -25,6 +25,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
"github.com/filecoin-project/lotus/chain/actors/policy"
"github.com/filecoin-project/lotus/chain/beacon"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/rand"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/store"
@ -136,6 +137,8 @@ type StateManager struct {
tsExec Executor
tsExecMonitor ExecMonitor
beacon beacon.Schedule
msgIndex index.MsgIndex
}
// Caches a single state tree
@ -144,7 +147,7 @@ type treeCache struct {
tree *state.StateTree
}
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching) (*StateManager, error) {
func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) {
// If we have upgrades, make sure they're in-order and make sense.
if err := us.Validate(); err != nil {
return nil, err
@ -199,11 +202,12 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder,
tree: nil,
},
compWait: make(map[string]chan struct{}),
msgIndex: msgIndex,
}, nil
}
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs)
func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) {
sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex)
if err != nil {
return nil, err
}

View File

@ -207,9 +207,7 @@ type mmCids struct {
}
func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) {
o, ok := cs.mmCache.Get(mmc)
if ok {
mmcids := o.(*mmCids)
if mmcids, ok := cs.mmCache.Get(mmc); ok {
return mmcids.bls, mmcids.secpk, nil
}
@ -229,7 +227,7 @@ func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.C
return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err)
}
cs.mmCache.Add(mmc, &mmCids{
cs.mmCache.Add(mmc, mmCids{
bls: blscids,
secpk: secpkcids,
})
@ -237,6 +235,26 @@ func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.C
return blscids, secpkcids, nil
}
func (cs *ChainStore) ReadReceipts(ctx context.Context, root cid.Cid) ([]types.MessageReceipt, error) {
a, err := blockadt.AsArray(cs.ActorStore(ctx), root)
if err != nil {
return nil, err
}
receipts := make([]types.MessageReceipt, 0, a.Length())
var rcpt types.MessageReceipt
if err := a.ForEach(&rcpt, func(i int64) error {
if int64(len(receipts)) != i {
return xerrors.Errorf("missing receipt %d", i)
}
receipts = append(receipts, rcpt)
return nil
}); err != nil {
return nil, err
}
return receipts, nil
}
func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) {
blscids, secpkcids, err := cs.ReadMsgMetaCids(ctx, b.Messages)
if err != nil {

View File

@ -11,7 +11,7 @@ import (
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid"
dstore "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
@ -120,8 +120,8 @@ type ChainStore struct {
reorgCh chan<- reorg
reorgNotifeeCh chan ReorgNotifee
mmCache *lru.ARCCache // msg meta cache (mh.Messages -> secp, bls []cid)
tsCache *lru.ARCCache
mmCache *lru.ARCCache[cid.Cid, mmCids]
tsCache *lru.ARCCache[types.TipSetKey, *types.TipSet]
evtTypes [1]journal.EventType
journal journal.Journal
@ -133,8 +133,8 @@ type ChainStore struct {
}
func NewChainStore(chainBs bstore.Blockstore, stateBs bstore.Blockstore, ds dstore.Batching, weight WeightFunc, j journal.Journal) *ChainStore {
c, _ := lru.NewARC(DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC(DefaultTipSetCacheSize)
c, _ := lru.NewARC[cid.Cid, mmCids](DefaultMsgMetaCacheSize)
tsc, _ := lru.NewARC[types.TipSetKey, *types.TipSet](DefaultTipSetCacheSize)
if j == nil {
j = journal.NilJournal()
}
@ -818,9 +818,8 @@ func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHead
}
func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) {
v, ok := cs.tsCache.Get(tsk)
if ok {
return v.(*types.TipSet), nil
if ts, ok := cs.tsCache.Get(tsk); ok {
return ts, nil
}
// Fetch tipset block headers from blockstore in parallel

View File

@ -18,6 +18,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -214,7 +215,7 @@ func TestChainExportImportFull(t *testing.T) {
t.Fatal("imported chain differed from exported chain")
}
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, index.DummyMsgIndex)
if err != nil {
t.Fatal(err)
}

View File

@ -7,11 +7,12 @@ import (
"sync"
"time"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
logging "github.com/ipfs/go-log/v2"
"github.com/ipni/storetheindex/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/connmgr"
"github.com/libp2p/go-libp2p/core/peer"
@ -20,7 +21,6 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-legs/dtsync"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
@ -217,7 +217,7 @@ func fetchCids(
type BlockValidator struct {
self peer.ID
peers *lru.TwoQueueCache
peers *lru.TwoQueueCache[peer.ID, int]
killThresh int
@ -231,7 +231,7 @@ type BlockValidator struct {
}
func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Consensus, blacklist func(peer.ID)) *BlockValidator {
p, _ := lru.New2Q(4096)
p, _ := lru.New2Q[peer.ID, int](4096)
return &BlockValidator{
self: self,
peers: p,
@ -244,21 +244,19 @@ func NewBlockValidator(self peer.ID, chain *store.ChainStore, cns consensus.Cons
}
func (bv *BlockValidator) flagPeer(p peer.ID) {
v, ok := bv.peers.Get(p)
val, ok := bv.peers.Get(p)
if !ok {
bv.peers.Add(p, int(1))
bv.peers.Add(p, 1)
return
}
val := v.(int)
if val >= bv.killThresh {
log.Warnf("blacklisting peer %s", p)
bv.blacklist(p)
return
}
bv.peers.Add(p, v.(int)+1)
bv.peers.Add(p, val+1)
}
func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) {
@ -293,11 +291,11 @@ func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub
}
type blockReceiptCache struct {
blocks *lru.TwoQueueCache
blocks *lru.TwoQueueCache[cid.Cid, int]
}
func newBlockReceiptCache() *blockReceiptCache {
c, _ := lru.New2Q(8192)
c, _ := lru.New2Q[cid.Cid, int](8192)
return &blockReceiptCache{
blocks: c,
@ -307,12 +305,12 @@ func newBlockReceiptCache() *blockReceiptCache {
func (brc *blockReceiptCache) add(bcid cid.Cid) int {
val, ok := brc.blocks.Get(bcid)
if !ok {
brc.blocks.Add(bcid, int(1))
brc.blocks.Add(bcid, 1)
return 0
}
brc.blocks.Add(bcid, val.(int)+1)
return val.(int)
brc.blocks.Add(bcid, val+1)
return val
}
type MessageValidator struct {
@ -466,13 +464,13 @@ type peerMsgInfo struct {
type IndexerMessageValidator struct {
self peer.ID
peerCache *lru.TwoQueueCache
peerCache *lru.TwoQueueCache[address.Address, *peerMsgInfo]
chainApi full.ChainModuleAPI
stateApi full.StateModuleAPI
}
func NewIndexerMessageValidator(self peer.ID, chainApi full.ChainModuleAPI, stateApi full.StateModuleAPI) *IndexerMessageValidator {
peerCache, _ := lru.New2Q(8192)
peerCache, _ := lru.New2Q[address.Address, *peerMsgInfo](8192)
return &IndexerMessageValidator{
self: self,
@ -497,7 +495,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
return pubsub.ValidationIgnore
}
idxrMsg := dtsync.Message{}
idxrMsg := message.Message{}
err := idxrMsg.UnmarshalCBOR(bytes.NewBuffer(msg.Data))
if err != nil {
log.Errorw("Could not decode indexer pubsub message", "err", err)
@ -515,15 +513,12 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
return pubsub.ValidationReject
}
minerID := minerAddr.String()
msgCid := idxrMsg.Cid
var msgInfo *peerMsgInfo
val, ok := v.peerCache.Get(minerID)
msgInfo, ok := v.peerCache.Get(minerAddr)
if !ok {
msgInfo = &peerMsgInfo{}
} else {
msgInfo = val.(*peerMsgInfo)
}
// Lock this peer's message info.
@ -544,7 +539,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
// Check that the miner ID maps to the peer that sent the message.
err = v.authenticateMessage(ctx, minerAddr, originPeer)
if err != nil {
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerID)
log.Warnw("cannot authenticate messsage", "err", err, "peer", originPeer, "minerID", minerAddr)
stats.Record(ctx, metrics.IndexerMessageValidationFailure.M(1))
return pubsub.ValidationReject
}
@ -554,7 +549,7 @@ func (v *IndexerMessageValidator) Validate(ctx context.Context, pid peer.ID, msg
// messages from the same peer are handled concurrently, there is a
// small chance that one msgInfo could replace the other here when
// the info is first cached. This is OK, so no need to prevent it.
v.peerCache.Add(minerID, msgInfo)
v.peerCache.Add(minerAddr, msgInfo)
}
}

View File

@ -9,12 +9,12 @@ import (
"github.com/golang/mock/gomock"
"github.com/ipfs/go-cid"
blocks "github.com/ipfs/go-libipfs/blocks"
"github.com/ipni/storetheindex/announce/message"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-legs/dtsync"
"github.com/filecoin-project/lotus/api/mocks"
"github.com/filecoin-project/lotus/chain/types"
@ -105,7 +105,7 @@ func TestIndexerMessageValidator_Validate(t *testing.T) {
mc := gomock.NewController(t)
node := mocks.NewMockFullNode(mc)
subject := NewIndexerMessageValidator(peer.ID(tc.selfPID), node, node)
message := dtsync.Message{
message := message.Message{
Cid: validCid,
Addrs: nil,
ExtraData: tc.extraData,

View File

@ -340,7 +340,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -618,7 +618,7 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -828,7 +828,7 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -1591,7 +1591,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -2202,7 +2202,7 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -2227,7 +2227,7 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -2252,7 +2252,7 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -2277,7 +2277,7 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -2534,7 +2534,7 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:

View File

@ -44,14 +44,6 @@ type EthTx struct {
S EthBigInt `json:"s"`
}
func (tx *EthTx) Reward(blkBaseFee big.Int) EthBigInt {
availablePriorityFee := big.Sub(big.Int(tx.MaxFeePerGas), blkBaseFee)
if big.Cmp(big.Int(tx.MaxPriorityFeePerGas), availablePriorityFee) <= 0 {
return tx.MaxPriorityFeePerGas
}
return EthBigInt(availablePriorityFee)
}
type EthTxArgs struct {
ChainID int `json:"chainId"`
Nonce int `json:"nonce"`

View File

@ -295,17 +295,21 @@ func EthAddressFromPubKey(pubk []byte) ([]byte, error) {
return ethAddr, nil
}
var maskedIDPrefix = [20 - 8]byte{0xff}
func IsEthAddress(addr address.Address) bool {
if addr.Protocol() != address.Delegated {
return false
}
payload := addr.Payload()
namespace, _, err := varint.FromUvarint(payload)
namespace, offset, err := varint.FromUvarint(payload)
if err != nil {
return false
}
return namespace == builtintypes.EthereumAddressManagerActorID
payload = payload[offset:]
return namespace == builtintypes.EthereumAddressManagerActorID && len(payload) == 20 && !bytes.HasPrefix(payload, maskedIDPrefix[:])
}
func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) {
@ -326,9 +330,17 @@ func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) {
return EthAddress{}, xerrors.Errorf("invalid delegated address namespace in: %s", addr)
}
payload = payload[n:]
if namespace == builtintypes.EthereumAddressManagerActorID {
return CastEthAddress(payload)
if namespace != builtintypes.EthereumAddressManagerActorID {
return EthAddress{}, ErrInvalidAddress
}
ethAddr, err := CastEthAddress(payload)
if err != nil {
return EthAddress{}, err
}
if ethAddr.IsMaskedID() {
return EthAddress{}, xerrors.Errorf("f410f addresses cannot embed masked-ID payloads: %s", ethAddr)
}
return ethAddr, nil
}
return EthAddress{}, ErrInvalidAddress
}
@ -376,8 +388,7 @@ func (ea *EthAddress) UnmarshalJSON(b []byte) error {
}
func (ea EthAddress) IsMaskedID() bool {
idmask := [12]byte{0xff}
return bytes.Equal(ea[:12], idmask[:])
return bytes.HasPrefix(ea[:], maskedIDPrefix[:])
}
func (ea EthAddress) ToFilecoinAddress() (address.Address, error) {

View File

@ -9,6 +9,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin"
)
type TestCase struct {
@ -178,6 +179,20 @@ func TestParseEthAddr(t *testing.T) {
}
}
func TestMaskedIDInF4(t *testing.T) {
addr, err := address.NewIDAddress(100)
require.NoError(t, err)
eaddr, err := EthAddressFromFilecoinAddress(addr)
require.NoError(t, err)
badaddr, err := address.NewDelegatedAddress(builtin.EthereumAddressManagerActorID, eaddr[:])
require.NoError(t, err)
_, err = EthAddressFromFilecoinAddress(badaddr)
require.Error(t, err)
}
func TestUnmarshalEthCall(t *testing.T) {
data := `{"from":"0x4D6D86b31a112a05A473c4aE84afaF873f632325","to":"0xFe01CC39f5Ae8553D6914DBb9dC27D219fa22D7f","gas":"0x5","gasPrice":"0x6","value":"0x123","data":""}`

View File

@ -219,4 +219,17 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version)
return nil
}
// EffectiveGasPremium returns the effective gas premium claimable by the miner
// given the supplied base fee.
//
// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the
// specified premium.
func (m *Message) EffectiveGasPremium(baseFee abi.TokenAmount) abi.TokenAmount {
available := big.Sub(m.GasFeeCap, baseFee)
if big.Cmp(m.GasPremium, available) <= 0 {
return m.GasPremium
}
return available
}
const TestGasLimit = 100e6

View File

@ -234,6 +234,10 @@ func (ts *TipSet) MinTicketBlock() *BlockHeader {
return min
}
func (ts *TipSet) ParentMessageReceipts() cid.Cid {
return ts.blks[0].ParentMessageReceipts
}
func (ts *TipSet) ParentState() cid.Cid {
return ts.blks[0].ParentStateRoot
}

View File

@ -31,7 +31,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -35,6 +35,7 @@ var EvmCmd = &cli.Command{
EvmGetInfoCmd,
EvmCallSimulateCmd,
EvmGetContractAddress,
EvmGetBytecode,
},
}
@ -486,3 +487,51 @@ func ethAddrFromFilecoinAddress(ctx context.Context, addr address.Address, fnapi
return ethAddr, faddr, nil
}
var EvmGetBytecode = &cli.Command{
Name: "bytecode",
Usage: "Write the bytecode of a smart contract to a file",
ArgsUsage: "[contract-address] [file-name]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "bin",
Usage: "write the bytecode as raw binary and don't hex-encode",
},
},
Action: func(cctx *cli.Context) error {
if cctx.NArg() != 2 {
return IncorrectNumArgs(cctx)
}
contractAddr, err := ethtypes.ParseEthAddress(cctx.Args().Get(0))
if err != nil {
return err
}
fileName := cctx.Args().Get(1)
api, closer, err := GetFullNodeAPIV1(cctx)
if err != nil {
return err
}
defer closer()
ctx := ReqContext(cctx)
code, err := api.EthGetCode(ctx, contractAddr, "latest")
if err != nil {
return err
}
if !cctx.Bool("bin") {
newCode := make([]byte, hex.EncodedLen(len(code)))
hex.Encode(newCode, code)
code = newCode
}
if err := os.WriteFile(fileName, code, 0o666); err != nil {
return xerrors.Errorf("failed to write bytecode to file %s: %w", fileName, err)
}
fmt.Printf("Code for %s written to %s\n", contractAddr, fileName)
return nil
},
}

View File

@ -36,6 +36,7 @@ import (
badgerbs "github.com/filecoin-project/lotus/blockstore/badger"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -229,7 +230,7 @@ var importBenchCmd = &cli.Command{
defer cs.Close() //nolint:errcheck
// TODO: We need to supply the actual beacon after v14
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs)
stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -31,7 +31,7 @@ var configDefaultCmd = &cli.Command{
Action: func(cctx *cli.Context) error {
c := config.DefaultStorageMiner()
cb, err := config.ConfigUpdate(c, nil, !cctx.Bool("no-comment"))
cb, err := config.ConfigUpdate(c, nil, config.Commented(!cctx.Bool("no-comment")))
if err != nil {
return err
}
@ -83,7 +83,7 @@ var configUpdateCmd = &cli.Command{
cfgDef := config.DefaultStorageMiner()
updated, err := config.ConfigUpdate(cfgNode, cfgDef, !cctx.Bool("no-comment"))
updated, err := config.ConfigUpdate(cfgNode, cfgDef, config.Commented(!cctx.Bool("no-comment")))
if err != nil {
return err
}

View File

@ -150,11 +150,6 @@ var infoAllCmd = &cli.Command{
}
}
fmt.Println("\n#: Retrieval Deals")
if err := retrievalDealsListCmd.Action(cctx); err != nil {
fmt.Println("ERROR: ", err)
}
fmt.Println("\n#: Data Transfers")
{
fs := &flag.FlagSet{}

View File

@ -189,7 +189,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi
return
}
ff, err := config.FromFile(cf, rcfg)
ff, err := config.FromFile(cf, config.SetDefault(func() (interface{}, error) { return rcfg, nil }))
if err != nil {
cerr = xerrors.Errorf("loading config: %w", err)
return

View File

@ -24,7 +24,7 @@ import (
"golang.org/x/xerrors"
cborutil "github.com/filecoin-project/go-cbor-util"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -3,13 +3,11 @@ package main
import (
"fmt"
"os"
"sort"
"text/tabwriter"
"github.com/docker/go-units"
"github.com/urfave/cli/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/types"
@ -21,7 +19,6 @@ var retrievalDealsCmd = &cli.Command{
Usage: "Manage retrieval deals and related configuration",
Subcommands: []*cli.Command{
retrievalDealSelectionCmd,
retrievalDealsListCmd,
retrievalSetAskCmd,
retrievalGetAskCmd,
},
@ -124,48 +121,6 @@ var retrievalDealSelectionRejectCmd = &cli.Command{
},
}
var retrievalDealsListCmd = &cli.Command{
Name: "list",
Usage: "List all active retrieval deals for this miner",
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetMarketsAPI(cctx)
if err != nil {
return err
}
defer closer()
deals, err := api.MarketListRetrievalDeals(lcli.DaemonContext(cctx))
if err != nil {
return err
}
sort.Slice(deals, func(i, j int) bool {
return deals[i].ID < deals[j].ID
})
w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0)
_, _ = fmt.Fprintf(w, "Receiver\tDealID\tPayload\tState\tPricePerByte\tBytesSent\tMessage\n")
for _, deal := range deals {
payloadCid := deal.PayloadCID.String()
_, _ = fmt.Fprintf(w,
"%s\t%d\t%s\t%s\t%s\t%d\t%s\n",
deal.Receiver.String(),
deal.ID,
"..."+payloadCid[len(payloadCid)-8:],
retrievalmarket.DealStatuses[deal.Status],
deal.PricePerByte.String(),
deal.TotalSent,
deal.Message,
)
}
return w.Flush()
},
}
var retrievalSetAskCmd = &cli.Command{
Name: "set-ask",
Usage: "Configure the provider's retrieval ask",

View File

@ -35,6 +35,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/gen/genesis"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -513,7 +514,7 @@ var chainBalanceStateCmd = &cli.Command{
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
if err != nil {
return err
}
@ -737,7 +738,7 @@ var chainPledgeCmd = &cli.Command{
cst := cbor.NewCborStore(bs)
store := adt.WrapStore(ctx, cst)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -20,6 +20,7 @@ import (
"github.com/filecoin-project/lotus/chain/beacon/drand"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -111,7 +112,7 @@ var gasTraceCmd = &cli.Command{
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
if err != nil {
return err
}
@ -212,7 +213,7 @@ var replayOfflineCmd = &cli.Command{
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -21,6 +21,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -90,7 +91,7 @@ var invariantsCmd = &cli.Command{
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
defer cs.Close() //nolint:errcheck
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -83,6 +83,7 @@ func main() {
invariantsCmd,
gasTraceCmd,
replayOfflineCmd,
msgindexCmd,
}
app := &cli.App{

View File

@ -41,6 +41,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -123,7 +124,7 @@ var migrationsCmd = &cli.Command{
defer cs.Close() //nolint:errcheck
// Note: we use a map datastore for the metadata to avoid writing / using cached migration results in the metadata store
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, datastore.NewMapDatastore())
sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, datastore.NewMapDatastore(), index.DummyMsgIndex)
if err != nil {
return err
}

221
cmd/lotus-shed/msgindex.go Normal file
View File

@ -0,0 +1,221 @@
package main
import (
"database/sql"
"fmt"
"path"
"github.com/ipfs/go-cid"
_ "github.com/mattn/go-sqlite3"
"github.com/mitchellh/go-homedir"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
lcli "github.com/filecoin-project/lotus/cli"
)
var msgindexCmd = &cli.Command{
Name: "msgindex",
Usage: "Tools for managing the message index",
Subcommands: []*cli.Command{
msgindexBackfillCmd,
msgindexPruneCmd,
},
}
var msgindexBackfillCmd = &cli.Command{
Name: "backfill",
Usage: "Backfill the message index for a number of epochs starting from a specified height",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "from",
Value: 0,
Usage: "height to start the backfill; uses the current head if omitted",
},
&cli.IntFlag{
Name: "epochs",
Value: 1800,
Usage: "number of epochs to backfill; defaults to 1800 (2 finalities)",
},
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
Usage: "path to the repo",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
curTs, err := api.ChainHead(ctx)
if err != nil {
return err
}
startHeight := int64(cctx.Int("from"))
if startHeight == 0 {
startHeight = int64(curTs.Height()) - 1
}
epochs := cctx.Int("epochs")
basePath, err := homedir.Expand(cctx.String("repo"))
if err != nil {
return err
}
dbPath := path.Join(basePath, "sqlite", "msgindex.db")
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return err
}
defer func() {
err := db.Close()
if err != nil {
fmt.Printf("ERROR: closing db: %s", err)
}
}()
tx, err := db.Begin()
if err != nil {
return err
}
insertStmt, err := tx.Prepare("INSERT INTO messages VALUES (?, ?, ?)")
if err != nil {
return err
}
insertMsg := func(cid, tsCid cid.Cid, epoch abi.ChainEpoch) error {
key := cid.String()
tskey := tsCid.String()
if _, err := insertStmt.Exec(key, tskey, int64(epoch)); err != nil {
return err
}
return nil
}
rollback := func() {
if err := tx.Rollback(); err != nil {
fmt.Printf("ERROR: rollback: %s", err)
}
}
for i := 0; i < epochs; i++ {
epoch := abi.ChainEpoch(startHeight - int64(i))
ts, err := api.ChainGetTipSetByHeight(ctx, epoch, curTs.Key())
if err != nil {
rollback()
return err
}
tsCid, err := ts.Key().Cid()
if err != nil {
rollback()
return err
}
msgs, err := api.ChainGetMessagesInTipset(ctx, ts.Key())
if err != nil {
rollback()
return err
}
for _, msg := range msgs {
if err := insertMsg(msg.Cid, tsCid, epoch); err != nil {
rollback()
return err
}
}
}
if err := tx.Commit(); err != nil {
return err
}
return nil
},
}
var msgindexPruneCmd = &cli.Command{
Name: "prune",
Usage: "Prune the message index for messages included before a given epoch",
Flags: []cli.Flag{
&cli.IntFlag{
Name: "from",
Usage: "height to start the prune; if negative it indicates epochs from current head",
},
&cli.StringFlag{
Name: "repo",
Value: "~/.lotus",
Usage: "path to the repo",
},
},
Action: func(cctx *cli.Context) error {
api, closer, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer closer()
ctx := lcli.ReqContext(cctx)
startHeight := int64(cctx.Int("from"))
if startHeight < 0 {
curTs, err := api.ChainHead(ctx)
if err != nil {
return err
}
startHeight += int64(curTs.Height())
if startHeight < 0 {
return xerrors.Errorf("bogus start height %d", startHeight)
}
}
basePath, err := homedir.Expand(cctx.String("repo"))
if err != nil {
return err
}
dbPath := path.Join(basePath, "sqlite", "msgindex.db")
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return err
}
defer func() {
err := db.Close()
if err != nil {
fmt.Printf("ERROR: closing db: %s", err)
}
}()
tx, err := db.Begin()
if err != nil {
return err
}
if _, err := tx.Exec("DELETE FROM messages WHERE epoch < ?", startHeight); err != nil {
if err := tx.Rollback(); err != nil {
fmt.Printf("ERROR: rollback: %s", err)
}
return err
}
if err := tx.Commit(); err != nil {
return err
}
return nil
},
}

View File

@ -9,7 +9,7 @@ import (
"sync"
"github.com/docker/go-units"
lru "github.com/hashicorp/golang-lru"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
offline "github.com/ipfs/go-ipfs-exchange-offline"
@ -26,6 +26,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -50,13 +51,13 @@ type fieldItem struct {
type cacheNodeGetter struct {
ds format.NodeGetter
cache *lru.TwoQueueCache
cache *lru.TwoQueueCache[cid.Cid, format.Node]
}
func newCacheNodeGetter(d format.NodeGetter, size int) (*cacheNodeGetter, error) {
cng := &cacheNodeGetter{ds: d}
cache, err := lru.New2Q(size)
cache, err := lru.New2Q[cid.Cid, format.Node](size)
if err != nil {
return nil, err
}
@ -68,7 +69,7 @@ func newCacheNodeGetter(d format.NodeGetter, size int) (*cacheNodeGetter, error)
func (cng *cacheNodeGetter) Get(ctx context.Context, c cid.Cid) (format.Node, error) {
if n, ok := cng.cache.Get(c); ok {
return n.(format.Node), nil
return n, nil
}
n, err := cng.ds.Get(ctx, c)
@ -308,7 +309,7 @@ to reduce the number of decode operations performed by caching the decoded objec
}
tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc)
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -12,6 +12,7 @@ import (
"github.com/filecoin-project/lotus/blockstore"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -106,7 +107,7 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) {
if err != nil {
return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err)
}
sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil, nd.MetadataDS)
sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil, nd.MetadataDS, index.DummyMsgIndex)
if err != nil {
return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err)
}
@ -125,7 +126,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet)
if err != nil {
return nil, err
}
sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS)
sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, index.DummyMsgIndex)
if err != nil {
return nil, xerrors.Errorf("creating state manager: %w", err)
}

View File

@ -17,6 +17,7 @@ import (
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
@ -201,7 +202,7 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch
if err != nil {
return err
}
sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS)
sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -66,7 +66,7 @@ func restore(cctx *cli.Context, r repo.Repo) error {
return
}
ff, err := config.FromFile(cf, rcfg)
ff, err := config.FromFile(cf, config.SetDefault(func() (interface{}, error) { return rcfg, nil }))
if err != nil {
cerr = xerrors.Errorf("loading config: %w", err)
return

View File

@ -31,7 +31,7 @@ var configDefaultCmd = &cli.Command{
Action: func(cctx *cli.Context) error {
c := config.DefaultFullNode()
cb, err := config.ConfigUpdate(c, nil, !cctx.Bool("no-comment"))
cb, err := config.ConfigUpdate(c, nil, config.Commented(!cctx.Bool("no-comment")), config.DefaultKeepUncommented())
if err != nil {
return err
}
@ -83,7 +83,7 @@ var configUpdateCmd = &cli.Command{
cfgDef := config.DefaultFullNode()
updated, err := config.ConfigUpdate(cfgNode, cfgDef, !cctx.Bool("no-comment"))
updated, err := config.ConfigUpdate(cfgNode, cfgDef, config.Commented(!cctx.Bool("no-comment")), config.DefaultKeepUncommented())
if err != nil {
return err
}

View File

@ -35,6 +35,7 @@ import (
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
@ -540,7 +541,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool)
}
// TODO: We need to supply the actual beacon after v14
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds)
stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex)
if err != nil {
return err
}

View File

@ -223,7 +223,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -712,7 +712,7 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -812,7 +812,7 @@ func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -907,7 +907,7 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
@ -1064,7 +1064,7 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) (err error) {
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative oveflow")
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:

View File

@ -22,6 +22,7 @@ import (
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/consensus"
"github.com/filecoin-project/lotus/chain/consensus/filcns"
"github.com/filecoin-project/lotus/chain/index"
"github.com/filecoin-project/lotus/chain/state"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
@ -108,7 +109,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil)
tse = consensus.NewTipSetExecutor(filcns.RewardFunc)
sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds)
sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, index.DummyMsgIndex)
)
if err != nil {
return nil, err

View File

@ -1506,6 +1506,7 @@ Response:
```
### MarketListRetrievalDeals
MarketListRetrievalDeals is deprecated, returns empty list
Perms: read
@ -1515,48 +1516,7 @@ Inputs: `null`
Response:
```json
[
{
"PayloadCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"ID": 5,
"Selector": {
"Raw": "Ynl0ZSBhcnJheQ=="
},
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"PricePerByte": "0",
"PaymentInterval": 42,
"PaymentIntervalIncrease": 42,
"UnsealPrice": "0",
"StoreID": 42,
"ChannelID": {
"Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"ID": 3
},
"PieceInfo": {
"PieceCID": {
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
},
"Deals": [
{
"DealID": 5432,
"SectorID": 9,
"Offset": 1032,
"Length": 1032
}
]
},
"Status": 0,
"Receiver": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
"TotalSent": 42,
"FundsReceived": "0",
"Message": "string value",
"CurrentInterval": 42,
"LegacyProtocol": true
}
{}
]
```

View File

@ -91,9 +91,11 @@
* [EthGetTransactionByBlockHashAndIndex](#EthGetTransactionByBlockHashAndIndex)
* [EthGetTransactionByBlockNumberAndIndex](#EthGetTransactionByBlockNumberAndIndex)
* [EthGetTransactionByHash](#EthGetTransactionByHash)
* [EthGetTransactionByHashLimited](#EthGetTransactionByHashLimited)
* [EthGetTransactionCount](#EthGetTransactionCount)
* [EthGetTransactionHashByCid](#EthGetTransactionHashByCid)
* [EthGetTransactionReceipt](#EthGetTransactionReceipt)
* [EthGetTransactionReceiptLimited](#EthGetTransactionReceiptLimited)
* [EthMaxPriorityFeePerGas](#EthMaxPriorityFeePerGas)
* [EthNewBlockFilter](#EthNewBlockFilter)
* [EthNewFilter](#EthNewFilter)
@ -2810,6 +2812,45 @@ Response:
}
```
### EthGetTransactionByHashLimited
Perms: read
Inputs:
```json
[
"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
10101
]
```
Response:
```json
{
"chainId": "0x5",
"nonce": "0x5",
"hash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
"blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
"blockNumber": "0x5",
"transactionIndex": "0x5",
"from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
"to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
"value": "0x0",
"type": "0x5",
"input": "0x07",
"gas": "0x5",
"maxFeePerGas": "0x0",
"maxPriorityFeePerGas": "0x0",
"accessList": [
"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e"
],
"v": "0x0",
"r": "0x0",
"s": "0x0"
}
```
### EthGetTransactionCount
@ -2888,6 +2929,54 @@ Response:
}
```
### EthGetTransactionReceiptLimited
Perms: read
Inputs:
```json
[
"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
10101
]
```
Response:
```json
{
"transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
"transactionIndex": "0x5",
"blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
"blockNumber": "0x5",
"from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
"to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
"root": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
"status": "0x5",
"contractAddress": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
"cumulativeGasUsed": "0x5",
"gasUsed": "0x5",
"effectiveGasPrice": "0x0",
"logsBloom": "0x07",
"logs": [
{
"address": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031",
"data": "0x07",
"topics": [
"0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e"
],
"removed": true,
"logIndex": "0x5",
"transactionIndex": "0x5",
"transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
"blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e",
"blockNumber": "0x5"
}
],
"type": "0x5"
}
```
### EthMaxPriorityFeePerGas

View File

@ -839,7 +839,6 @@ USAGE:
COMMANDS:
selection Configure acceptance criteria for retrieval deal proposals
list List all active retrieval deals for this miner
set-ask Configure the provider's retrieval ask
get-ask Get the provider's current retrieval ask configured by the provider in the ask-store using the set-ask CLI command
help, h Shows a list of commands or help for one command
@ -908,19 +907,6 @@ OPTIONS:
```
### lotus-miner retrieval-deals list
```
NAME:
lotus-miner retrieval-deals list - List all active retrieval deals for this miner
USAGE:
lotus-miner retrieval-deals list [command options] [arguments...]
OPTIONS:
--help, -h show help (default: false)
```
### lotus-miner retrieval-deals set-ask
```
NAME:

View File

@ -2647,6 +2647,7 @@ COMMANDS:
stat Print eth/filecoin addrs and code cid
call Simulate an eth contract call
contract-address Generate contract address from smart contract code
bytecode Write the bytecode of a smart contract to a file
help, h Shows a list of commands or help for one command
OPTIONS:
@ -2721,6 +2722,19 @@ OPTIONS:
```
### lotus evm bytecode
```
NAME:
lotus evm bytecode - Write the bytecode of a smart contract to a file
USAGE:
lotus evm bytecode [command options] [contract-address] [file-name]
OPTIONS:
--bin write the bytecode as raw binary and don't hex-encode (default: false)
```
## lotus net
```
NAME:

View File

@ -191,7 +191,7 @@
[Chainstore]
# type: bool
# env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE
#EnableSplitstore = true
EnableSplitstore = true
[Chainstore.Splitstore]
# ColdStoreType specifies the type of the coldstore.
@ -390,3 +390,11 @@
#DatabasePath = ""
[Index]
# EnableMsgIndex enables indexing of messages on chain.
#
# type: bool
# env var: LOTUS_INDEX_ENABLEMSGINDEX
#EnableMsgIndex = false

2
extern/filecoin-ffi vendored

@ -1 +1 @@
Subproject commit 88dc4c4ceb322337818b6508c8e9c23948f36cb1
Subproject commit 28e3cd44d91681c074aba362d1e5c954db110ad9

View File

@ -93,6 +93,7 @@ type TargetAPI interface {
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error)
@ -103,11 +104,11 @@ type TargetAPI interface {
EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error)
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error)
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error)
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*api.EthTxReceipt, error)
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error)
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error)
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error)
@ -196,6 +197,9 @@ func (gw *Node) checkTipset(ts *types.TipSet) error {
}
func (gw *Node) checkTipsetHeight(ts *types.TipSet, h abi.ChainEpoch) error {
if h > ts.Height() {
return fmt.Errorf("tipset height in future")
}
tsBlock := ts.Blocks()[0]
heightDelta := time.Duration(uint64(tsBlock.Height-h)*build.BlockDelaySecs) * time.Second
timeAtHeight := time.Unix(int64(tsBlock.Timestamp), 0).Add(-heightDelta)

View File

@ -21,7 +21,7 @@ import (
)
func (gw *Node) Web3ClientVersion(ctx context.Context) (string, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
return "", err
}
@ -34,7 +34,7 @@ func (gw *Node) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error)
}
func (gw *Node) EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
if err := gw.limit(ctx, chainRateLimitTokens); err != nil {
return 0, err
}
@ -80,40 +80,40 @@ func (gw *Node) checkBlkHash(ctx context.Context, blkHash ethtypes.EthHash) erro
return gw.checkTipsetKey(ctx, tsk)
}
func (gw *Node) checkBlkParam(ctx context.Context, blkParam string) error {
func (gw *Node) checkBlkParam(ctx context.Context, blkParam string, lookback ethtypes.EthUint64) error {
if blkParam == "earliest" {
// also not supported in node impl
return fmt.Errorf("block param \"earliest\" is not supported")
}
switch blkParam {
case "pending", "latest":
// those will be recent enough, so we don't need to check
return nil
default:
var num ethtypes.EthUint64
err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`))
if err != nil {
return fmt.Errorf("cannot parse block number: %v", err)
}
head, err := gw.target.ChainHead(ctx)
if err != nil {
return err
}
if err := gw.checkTipsetHeight(head, abi.ChainEpoch(num)); err != nil {
return err
}
head, err := gw.target.ChainHead(ctx)
if err != nil {
return err
}
return nil
var num ethtypes.EthUint64
switch blkParam {
case "pending", "latest":
// Head is always ok.
if lookback == 0 {
return nil
}
// Can't look beyond 0 anyways.
if lookback > ethtypes.EthUint64(head.Height()) {
break
}
num = ethtypes.EthUint64(head.Height()) - lookback
default:
if err := num.UnmarshalJSON([]byte(`"` + blkParam + `"`)); err != nil {
return fmt.Errorf("cannot parse block number: %v", err)
}
}
return gw.checkTipsetHeight(head, abi.ChainEpoch(num))
}
func (gw *Node) EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return 0, err
}
if err := gw.checkBlkHash(ctx, blkHash); err != nil {
if err := gw.limit(ctx, chainRateLimitTokens); err != nil {
return 0, err
}
@ -137,7 +137,7 @@ func (gw *Node) EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxIn
return ethtypes.EthBlock{}, err
}
if err := gw.checkBlkParam(ctx, blkNum); err != nil {
if err := gw.checkBlkParam(ctx, blkNum, 0); err != nil {
return ethtypes.EthBlock{}, err
}
@ -145,11 +145,21 @@ func (gw *Node) EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxIn
}
func (gw *Node) EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) {
return gw.target.EthGetTransactionByHashLimited(ctx, txHash, api.LookbackNoLimit)
}
func (gw *Node) EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err
}
if limit == api.LookbackNoLimit {
limit = gw.stateWaitLookbackLimit
}
if gw.stateWaitLookbackLimit != api.LookbackNoLimit && limit > gw.stateWaitLookbackLimit {
limit = gw.stateWaitLookbackLimit
}
return gw.target.EthGetTransactionByHash(ctx, txHash)
return gw.target.EthGetTransactionByHashLimited(ctx, txHash, limit)
}
func (gw *Node) EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) {
@ -161,7 +171,7 @@ func (gw *Node) EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*e
}
func (gw *Node) EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
if err := gw.limit(ctx, chainRateLimitTokens); err != nil {
return nil, err
}
@ -173,7 +183,7 @@ func (gw *Node) EthGetTransactionCount(ctx context.Context, sender ethtypes.EthA
return 0, err
}
if err := gw.checkBlkParam(ctx, blkOpt); err != nil {
if err := gw.checkBlkParam(ctx, blkOpt, 0); err != nil {
return 0, err
}
@ -181,39 +191,21 @@ func (gw *Node) EthGetTransactionCount(ctx context.Context, sender ethtypes.EthA
}
func (gw *Node) EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*api.EthTxReceipt, error) {
return gw.EthGetTransactionReceiptLimited(ctx, txHash, api.LookbackNoLimit)
}
func (gw *Node) EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err
}
return gw.target.EthGetTransactionReceipt(ctx, txHash)
}
func (gw *Node) EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return ethtypes.EthTx{}, err
if limit == api.LookbackNoLimit {
limit = gw.stateWaitLookbackLimit
}
if gw.stateWaitLookbackLimit != api.LookbackNoLimit && limit > gw.stateWaitLookbackLimit {
limit = gw.stateWaitLookbackLimit
}
if err := gw.checkBlkHash(ctx, blkHash); err != nil {
return ethtypes.EthTx{}, err
}
return gw.target.EthGetTransactionByBlockHashAndIndex(ctx, blkHash, txIndex)
}
func (gw *Node) EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return ethtypes.EthTx{}, err
}
head, err := gw.target.ChainHead(ctx)
if err != nil {
return ethtypes.EthTx{}, err
}
if err := gw.checkTipsetHeight(head, abi.ChainEpoch(blkNum)); err != nil {
return ethtypes.EthTx{}, err
}
return gw.target.EthGetTransactionByBlockNumberAndIndex(ctx, blkNum, txIndex)
return gw.target.EthGetTransactionReceiptLimited(ctx, txHash, limit)
}
func (gw *Node) EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error) {
@ -221,7 +213,7 @@ func (gw *Node) EthGetCode(ctx context.Context, address ethtypes.EthAddress, blk
return nil, err
}
if err := gw.checkBlkParam(ctx, blkOpt); err != nil {
if err := gw.checkBlkParam(ctx, blkOpt, 0); err != nil {
return nil, err
}
@ -233,7 +225,7 @@ func (gw *Node) EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress
return nil, err
}
if err := gw.checkBlkParam(ctx, blkParam); err != nil {
if err := gw.checkBlkParam(ctx, blkParam, 0); err != nil {
return nil, err
}
@ -245,7 +237,7 @@ func (gw *Node) EthGetBalance(ctx context.Context, address ethtypes.EthAddress,
return ethtypes.EthBigInt(big.Zero()), err
}
if err := gw.checkBlkParam(ctx, blkParam); err != nil {
if err := gw.checkBlkParam(ctx, blkParam, 0); err != nil {
return ethtypes.EthBigInt(big.Zero()), err
}
@ -253,7 +245,7 @@ func (gw *Node) EthGetBalance(ctx context.Context, address ethtypes.EthAddress,
}
func (gw *Node) EthChainId(ctx context.Context) (ethtypes.EthUint64, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
return 0, err
}
@ -269,7 +261,7 @@ func (gw *Node) NetVersion(ctx context.Context) (string, error) {
}
func (gw *Node) NetListening(ctx context.Context) (bool, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
if err := gw.limit(ctx, basicRateLimitTokens); err != nil {
return false, err
}
@ -285,7 +277,7 @@ func (gw *Node) EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, err
}
func (gw *Node) EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
if err := gw.limit(ctx, chainRateLimitTokens); err != nil {
return ethtypes.EthBigInt(big.Zero()), err
}
@ -304,7 +296,7 @@ func (gw *Node) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtype
return ethtypes.EthFeeHistory{}, err
}
if err := gw.checkBlkParam(ctx, params.NewestBlkNum); err != nil {
if err := gw.checkBlkParam(ctx, params.NewestBlkNum, params.BlkCount); err != nil {
return ethtypes.EthFeeHistory{}, err
}
@ -316,7 +308,7 @@ func (gw *Node) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtype
}
func (gw *Node) EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
if err := gw.limit(ctx, chainRateLimitTokens); err != nil {
return ethtypes.EthBigInt(big.Zero()), err
}
@ -337,7 +329,7 @@ func (gw *Node) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam strin
return nil, err
}
if err := gw.checkBlkParam(ctx, blkParam); err != nil {
if err := gw.checkBlkParam(ctx, blkParam, 0); err != nil {
return nil, err
}
@ -359,12 +351,12 @@ func (gw *Node) EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec)
}
if filter.FromBlock != nil {
if err := gw.checkBlkParam(ctx, *filter.FromBlock); err != nil {
if err := gw.checkBlkParam(ctx, *filter.FromBlock, 0); err != nil {
return nil, err
}
}
if filter.ToBlock != nil {
if err := gw.checkBlkParam(ctx, *filter.ToBlock); err != nil {
if err := gw.checkBlkParam(ctx, *filter.ToBlock, 0); err != nil {
return nil, err
}
}

View File

@ -247,9 +247,6 @@ func (gw *Node) MsigGetVested(ctx context.Context, addr address.Address, start t
if err := gw.limit(ctx, walletRateLimitTokens); err != nil {
return types.BigInt{}, err
}
if err := gw.checkTipsetKey(ctx, start); err != nil {
return types.NewInt(0), err
}
if err := gw.checkTipsetKey(ctx, end); err != nil {
return types.NewInt(0), err
}
@ -522,6 +519,16 @@ func (gw *Node) StateVerifiedClientStatus(ctx context.Context, addr address.Addr
return gw.target.StateVerifiedClientStatus(ctx, addr, tsk)
}
func (gw *Node) StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return nil, err
}
if err := gw.checkTipsetKey(ctx, tsk); err != nil {
return nil, err
}
return gw.target.StateVerifierStatus(ctx, addr, tsk)
}
func (gw *Node) StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) {
if err := gw.limit(ctx, stateRateLimitTokens); err != nil {
return api.CirculatingSupply{}, err

184
go.mod
View File

@ -4,6 +4,8 @@ go 1.18
retract v1.14.0 // Accidentally force-pushed tag, use v1.14.1+ instead.
retract v1.20.2 // Wrongfully cherry picked PR, use v1.20.2+ instead.
require (
contrib.go.opencensus.io/exporter/prometheus v0.4.0
github.com/BurntSushi/toml v1.1.0
@ -14,14 +16,14 @@ require (
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921
github.com/buger/goterm v1.0.3
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
github.com/chzyer/readline v1.5.0
github.com/containerd/cgroups v1.0.4
github.com/coreos/go-systemd/v22 v22.4.0
github.com/coreos/go-systemd/v22 v22.5.0
github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e
github.com/dgraph-io/badger/v2 v2.2007.3
github.com/dgraph-io/badger/v2 v2.2007.4
github.com/docker/go-units v0.5.0
github.com/drand/drand v1.3.0
github.com/drand/kyber v1.1.7
github.com/drand/drand v1.4.9
github.com/drand/kyber v1.1.15
github.com/dustin/go-humanize v1.0.0
github.com/elastic/go-elasticsearch/v7 v7.14.0
github.com/elastic/go-sysinfo v1.7.0
@ -36,16 +38,15 @@ require (
github.com/filecoin-project/go-cbor-util v0.0.1
github.com/filecoin-project/go-commp-utils v0.1.3
github.com/filecoin-project/go-crypto v0.0.1
github.com/filecoin-project/go-data-transfer v1.15.2
github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc4
github.com/filecoin-project/go-fil-commcid v0.1.0
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0
github.com/filecoin-project/go-fil-markets v1.26.0
github.com/filecoin-project/go-jsonrpc v0.2.2
github.com/filecoin-project/go-legs v0.4.4
github.com/filecoin-project/go-fil-markets v1.27.0-rc1
github.com/filecoin-project/go-jsonrpc v0.2.3
github.com/filecoin-project/go-padreader v0.0.1
github.com/filecoin-project/go-paramfetch v0.0.4
github.com/filecoin-project/go-state-types v0.10.0
github.com/filecoin-project/go-statemachine v1.0.2
github.com/filecoin-project/go-statemachine v1.0.3
github.com/filecoin-project/go-statestore v0.2.0
github.com/filecoin-project/go-storedcounter v0.1.0
github.com/filecoin-project/pubsub v1.0.0
@ -60,7 +61,6 @@ require (
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gdamore/tcell/v2 v2.2.0
github.com/go-kit/kit v0.12.0
github.com/go-openapi/spec v0.19.11
github.com/golang/mock v1.6.0
github.com/google/uuid v1.3.0
@ -69,62 +69,63 @@ require (
github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
github.com/hashicorp/go-hclog v1.3.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/golang-lru v0.5.4
github.com/hashicorp/raft v1.1.1
github.com/hashicorp/golang-lru/v2 v2.0.2
github.com/hashicorp/raft v1.3.10
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
github.com/ipfs/bbloom v0.0.4
github.com/ipfs/go-blockservice v0.4.0
github.com/ipfs/go-cid v0.3.2
github.com/ipfs/go-blockservice v0.5.0
github.com/ipfs/go-cid v0.4.0
github.com/ipfs/go-cidutil v0.1.0
github.com/ipfs/go-datastore v0.6.0
github.com/ipfs/go-ds-badger2 v0.1.2
github.com/ipfs/go-ds-badger2 v0.1.3
github.com/ipfs/go-ds-leveldb v0.5.0
github.com/ipfs/go-ds-measure v0.2.0
github.com/ipfs/go-fs-lock v0.0.7
github.com/ipfs/go-graphsync v0.13.2
github.com/ipfs/go-graphsync v0.14.3
github.com/ipfs/go-ipfs-blockstore v1.2.0
github.com/ipfs/go-ipfs-blocksutil v0.0.1
github.com/ipfs/go-ipfs-chunker v0.0.5
github.com/ipfs/go-ipfs-ds-help v1.1.0
github.com/ipfs/go-ipfs-exchange-interface v0.2.0
github.com/ipfs/go-ipfs-exchange-offline v0.3.0
github.com/ipfs/go-ipfs-files v0.1.1
github.com/ipfs/go-ipfs-http-client v0.4.0
github.com/ipfs/go-ipfs-http-client v0.5.0
github.com/ipfs/go-ipfs-routing v0.3.0
github.com/ipfs/go-ipfs-util v0.0.2
github.com/ipfs/go-ipld-cbor v0.0.6
github.com/ipfs/go-ipld-format v0.4.0
github.com/ipfs/go-libipfs v0.4.1
github.com/ipfs/go-libipfs v0.7.0
github.com/ipfs/go-log/v2 v2.5.1
github.com/ipfs/go-merkledag v0.8.1
github.com/ipfs/go-merkledag v0.9.0
github.com/ipfs/go-metrics-interface v0.0.1
github.com/ipfs/go-metrics-prometheus v0.0.2
github.com/ipfs/go-unixfs v0.4.0
github.com/ipfs/go-unixfsnode v1.4.0
github.com/ipfs/interface-go-ipfs-core v0.7.0
github.com/ipld/go-car v0.4.0
github.com/ipld/go-car/v2 v2.5.0
github.com/ipfs/go-unixfs v0.4.3
github.com/ipfs/go-unixfsnode v1.5.2
github.com/ipfs/interface-go-ipfs-core v0.11.1
github.com/ipld/go-car v0.5.0
github.com/ipld/go-car/v2 v2.7.0
github.com/ipld/go-codec-dagpb v1.5.0
github.com/ipld/go-ipld-prime v0.20.0
github.com/ipld/go-ipld-selector-text-lite v0.0.1
github.com/ipni/index-provider v0.10.0
github.com/ipni/index-provider v0.11.0
github.com/ipni/storetheindex v0.5.10
github.com/kelseyhightower/envconfig v1.4.0
github.com/koalacxr/quantile v0.0.1
github.com/libp2p/go-buffer-pool v0.1.0
github.com/libp2p/go-libp2p v0.23.4
github.com/libp2p/go-libp2p v0.26.2
github.com/libp2p/go-libp2p-consensus v0.0.1
github.com/libp2p/go-libp2p-gorpc v0.4.0
github.com/libp2p/go-libp2p-kad-dht v0.18.0
github.com/libp2p/go-libp2p-pubsub v0.8.2
github.com/libp2p/go-libp2p-raft v0.1.8
github.com/libp2p/go-libp2p-gorpc v0.5.0
github.com/libp2p/go-libp2p-kad-dht v0.21.0
github.com/libp2p/go-libp2p-pubsub v0.9.3
github.com/libp2p/go-libp2p-raft v0.4.0
github.com/libp2p/go-libp2p-record v0.2.0
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
github.com/libp2p/go-libp2p-routing-helpers v0.4.0
github.com/libp2p/go-maddr-filter v0.1.0
github.com/libp2p/go-msgio v0.2.0
github.com/mattn/go-isatty v0.0.16
github.com/libp2p/go-msgio v0.3.0
github.com/mattn/go-isatty v0.0.17
github.com/mattn/go-sqlite3 v1.14.16
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
github.com/mitchellh/go-homedir v1.1.0
@ -133,37 +134,38 @@ require (
github.com/multiformats/go-multiaddr-dns v0.3.1
github.com/multiformats/go-multibase v0.1.1
github.com/multiformats/go-multihash v0.2.1
github.com/multiformats/go-varint v0.0.6
github.com/multiformats/go-varint v0.0.7
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e
github.com/prometheus/client_golang v1.13.0
github.com/polydawn/refmt v0.89.0
github.com/prometheus/client_golang v1.14.0
github.com/raulk/clock v1.1.0
github.com/raulk/go-watchdog v1.3.0
github.com/stretchr/testify v1.8.1
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/urfave/cli/v2 v2.16.3
github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba
github.com/whyrusleeping/cbor-gen v0.0.0-20221021053955-c138aae13722
github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa
github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4
github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7
github.com/xeipuuv/gojsonschema v1.2.0
github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542
go.opencensus.io v0.23.0
go.opentelemetry.io/otel v1.11.1
github.com/zyedidia/generic v1.2.1
go.opencensus.io v0.24.0
go.opentelemetry.io/otel v1.12.0
go.opentelemetry.io/otel/bridge/opencensus v0.33.0
go.opentelemetry.io/otel/exporters/jaeger v1.2.0
go.opentelemetry.io/otel/sdk v1.11.1
go.uber.org/atomic v1.10.0
go.uber.org/fx v1.15.0
go.uber.org/multierr v1.8.0
go.uber.org/zap v1.23.0
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90
golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b
golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5
golang.org/x/sync v0.0.0-20220907140024-f12130a52804
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8
go.uber.org/fx v1.18.2
go.uber.org/multierr v1.9.0
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.5.0
golang.org/x/exp v0.0.0-20230129154200-a960b3787bd2
golang.org/x/net v0.7.0
golang.org/x/sync v0.1.0
golang.org/x/sys v0.5.0
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9
golang.org/x/tools v0.1.12
golang.org/x/tools v0.3.0
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2
gopkg.in/cheggaaa/pb.v1 v1.0.28
gotest.tools v2.2.0+incompatible
@ -174,7 +176,6 @@ require (
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/Stebalien/go-bitfield v0.0.1 // indirect
github.com/akavel/rsrc v0.8.0 // indirect
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect
github.com/armon/go-metrics v0.3.9 // indirect
@ -183,7 +184,7 @@ require (
github.com/bep/debounce v1.2.1 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cilium/ebpf v0.4.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
@ -193,8 +194,8 @@ require (
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
github.com/dgraph-io/ristretto v0.1.0 // indirect
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
github.com/drand/kyber-bls12381 v0.2.1 // indirect
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
github.com/drand/kyber-bls12381 v0.2.3 // indirect
github.com/elastic/go-windows v1.0.0 // indirect
github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect
@ -206,9 +207,8 @@ require (
github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect
github.com/flynn/noise v1.0.0 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/go-kit/log v0.2.0 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@ -225,29 +225,29 @@ require (
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect
github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect
github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/huin/goupnp v1.0.3 // indirect
github.com/iancoleman/orderedmap v0.1.0 // indirect
github.com/ipfs/go-bitfield v1.0.0 // indirect
github.com/ipfs/go-bitfield v1.1.0 // indirect
github.com/ipfs/go-block-format v0.1.1 // indirect
github.com/ipfs/go-filestore v1.2.0 // indirect
github.com/ipfs/go-ipfs-cmds v0.7.0 // indirect
github.com/ipfs/go-ipfs-config v0.18.0 // indirect
github.com/ipfs/go-ipfs-cmds v0.8.2 // indirect
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
github.com/ipfs/go-ipfs-files v0.3.0 // indirect
github.com/ipfs/go-ipfs-posinfo v0.0.1 // indirect
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
github.com/ipfs/go-ipld-legacy v0.1.1 // indirect
github.com/ipfs/go-ipns v0.3.0 // indirect
github.com/ipfs/go-log v1.0.5 // indirect
github.com/ipfs/go-path v0.3.0 // indirect
github.com/ipfs/go-path v0.3.1 // indirect
github.com/ipfs/go-peertaskqueue v0.8.1 // indirect
github.com/ipfs/go-verifcid v0.0.2 // indirect
github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect
github.com/ipni/storetheindex v0.5.3-0.20221203123030-16745cb63f15 // indirect
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect
@ -255,78 +255,68 @@ require (
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jessevdk/go-flags v1.4.0 // indirect
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391 // indirect
github.com/klauspost/compress v1.15.10 // indirect
github.com/klauspost/cpuid/v2 v2.1.1 // indirect
github.com/kilic/bls12-381 v0.1.0 // indirect
github.com/klauspost/compress v1.15.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
github.com/koron/go-ssdp v0.0.3 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect
github.com/libp2p/go-libp2p-connmgr v0.4.0 // indirect
github.com/libp2p/go-libp2p-core v0.20.1 // indirect
github.com/libp2p/go-libp2p-gostream v0.5.0 // indirect
github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect
github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect
github.com/libp2p/go-libp2p-noise v0.5.0 // indirect
github.com/libp2p/go-libp2p-peerstore v0.8.0 // indirect
github.com/libp2p/go-libp2p-tls v0.5.0 // indirect
github.com/libp2p/go-nat v0.1.0 // indirect
github.com/libp2p/go-netroute v0.2.0 // indirect
github.com/libp2p/go-openssl v0.1.0 // indirect
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.2.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
github.com/lucas-clemente/quic-go v0.29.1 // indirect
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
github.com/magefile/mage v1.9.0 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-colorable v0.1.9 // indirect
github.com/mattn/go-pointer v0.0.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.10 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base36 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multicodec v0.8.0 // indirect
github.com/multiformats/go-multistream v0.3.3 // indirect
github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/nikkolasg/hexjson v0.1.0 // indirect
github.com/nkovacs/streamquote v1.0.0 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/ginkgo/v2 v2.5.1 // indirect
github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/statsd_exporter v0.21.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-19 v0.2.1 // indirect
github.com/quic-go/qtls-go1-20 v0.1.1 // indirect
github.com/quic-go/quic-go v0.33.0 // indirect
github.com/quic-go/webtransport-go v0.5.2 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v2.18.12+incompatible // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/twmb/murmur3 v1.1.6 // indirect
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
github.com/ugorji/go/codec v1.2.6 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.0.1 // indirect
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
@ -334,20 +324,20 @@ require (
github.com/zondax/ledger-go v0.12.1 // indirect
go.opentelemetry.io/otel/metric v0.33.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.33.0 // indirect
go.opentelemetry.io/otel/trace v1.11.1 // indirect
go.uber.org/dig v1.12.0 // indirect
go.opentelemetry.io/otel/trace v1.12.0 // indirect
go.uber.org/dig v1.15.0 // indirect
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 // indirect
google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
lukechampine.com/blake3 v1.1.7 // indirect
nhooyr.io/websocket v1.8.7 // indirect
)
replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi

724
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,7 @@ import (
provider "github.com/ipni/index-provider"
"github.com/stretchr/testify/require"
datatransfer "github.com/filecoin-project/go-data-transfer"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/shared_testutil"
"github.com/filecoin-project/go-state-types/abi"

View File

@ -2,6 +2,7 @@ package itests
import (
"context"
"strconv"
"testing"
"time"
@ -9,6 +10,7 @@ import (
"github.com/filecoin-project/go-state-types/builtin"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/chain/wallet/key"
@ -109,3 +111,16 @@ func TestEthGetGenesis(t *testing.T) {
require.NoError(t, err)
require.Equal(t, ethBlk.Hash, genesisHash)
}
func TestNetVersion(t *testing.T) {
blockTime := 100 * time.Millisecond
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
ens.InterconnectAll().BeginMining(blockTime)
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
version, err := client.NetVersion(ctx)
require.NoError(t, err)
require.Equal(t, strconv.Itoa(build.Eip155ChainId), version)
}

View File

@ -49,9 +49,6 @@ func TestEthBlockHashesCorrect_MultiBlockTipset(t *testing.T) {
// let the chain run a little bit longer to minimise the chance of reorgs
n2.WaitTillChain(ctx, kit.HeightAtLeast(head.Height()+50))
head, err = n2.ChainHead(context.Background())
require.NoError(t, err)
for i := 1; i <= int(head.Height()); i++ {
hex := fmt.Sprintf("0x%x", i)

View File

@ -236,14 +236,6 @@ func TestEthOpenRPCConformance(t *testing.T) {
skipReason: "earliest block is not supported",
},
{
method: "eth_getBlockByNumber",
variant: "pending",
call: func(a *ethAPIRaw) (json.RawMessage, error) {
return ethapi.EthGetBlockByNumber(context.Background(), "pending", true)
},
},
{
method: "eth_getBlockByNumber",
call: func(a *ethAPIRaw) (json.RawMessage, error) {

View File

@ -192,12 +192,15 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) {
// if api shuts down before mining, we may get an error which we should probably just ignore
// (fixing it will require rewriting most of the mining loop)
if err != nil && !strings.Contains(err.Error(), "websocket connection closed") && !api.ErrorIsIn(err, []error{new(jsonrpc.RPCConnectionError)}) {
if err != nil && ctx.Err() == nil && !strings.Contains(err.Error(), "websocket connection closed") && !api.ErrorIsIn(err, []error{new(jsonrpc.RPCConnectionError)}) {
require.NoError(bm.t, err)
}
target = epoch
wait <- success
select {
case wait <- success:
case <-ctx.Done():
}
}
var success bool

View File

@ -10,8 +10,8 @@ import (
"time"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
ipld "github.com/ipfs/go-ipld-format"
"github.com/ipfs/go-libipfs/files"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"

View File

@ -17,8 +17,8 @@ import (
bstore "github.com/ipfs/go-ipfs-blockstore"
chunk "github.com/ipfs/go-ipfs-chunker"
offline "github.com/ipfs/go-ipfs-exchange-offline"
files "github.com/ipfs/go-ipfs-files"
ipldformat "github.com/ipfs/go-ipld-format"
"github.com/ipfs/go-libipfs/files"
"github.com/ipfs/go-merkledag"
"github.com/ipfs/go-unixfs/importer/balanced"
ihelper "github.com/ipfs/go-unixfs/importer/helpers"

Some files were not shown because too many files have changed in this diff Show More