lotus/api/api_full.go

1414 lines
71 KiB
Go
Raw Normal View History

package api
import (
"context"
"encoding/json"
"fmt"
2019-12-04 04:59:41 +00:00
"time"
2019-11-06 19:44:28 +00:00
"github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
2020-03-31 23:13:37 +00:00
"github.com/ipfs/go-cid"
2022-08-25 18:20:41 +00:00
"github.com/libp2p/go-libp2p/core/peer"
2020-03-31 23:13:37 +00:00
2020-02-04 02:45:20 +00:00
"github.com/filecoin-project/go-address"
2020-09-07 03:49:10 +00:00
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
2023-01-16 14:28:55 +00:00
"github.com/filecoin-project/go-jsonrpc"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
2022-04-20 21:34:28 +00:00
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
2022-09-26 16:47:48 +00:00
"github.com/filecoin-project/go-state-types/builtin/v9/market"
2022-10-03 05:51:30 +00:00
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network"
2022-06-14 15:00:51 +00:00
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
2020-09-15 13:29:25 +00:00
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
2020-03-31 23:13:37 +00:00
"github.com/filecoin-project/lotus/node/modules/dtypes"
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
"github.com/filecoin-project/lotus/node/repo/imports"
)
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode
2021-02-28 19:44:02 +00:00
// ChainIO abstracts operations for accessing raw IPLD objects.
type ChainIO interface {
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainPutObj(context.Context, blocks.Block) error
2021-02-28 19:44:02 +00:00
}
2021-04-05 11:23:46 +00:00
const LookbackNoLimit = abi.ChainEpoch(-1)
2021-04-26 18:36:20 +00:00
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
// you'll have to add those methods to interfaces in `api/v0api`
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
2019-07-24 00:09:34 +00:00
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
Net
2019-07-24 00:09:34 +00:00
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the
2020-06-30 22:28:49 +00:00
// blockchain, but that do not require any form of state computation.
2019-09-18 11:01:52 +00:00
2020-06-30 22:28:49 +00:00
// ChainNotify returns channel with chain head updates.
// First message is guaranteed to be of len == 1, and type == 'current'.
ChainNotify(context.Context) (<-chan []*HeadChange, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// ChainHead returns the current head of the chain.
ChainHead(context.Context) (*types.TipSet, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// ChainGetBlock returns the block specified by the given CID.
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
2020-06-30 22:28:49 +00:00
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// ChainGetBlockMessages returns messages stored in the specified block.
//
// Note: If there are multiple blocks in a tipset, it's likely that some
// messages will be duplicated. It's also possible for blocks in a tipset to have
// different messages from the same sender at the same nonce. When that happens,
// only the first message (in a block with lowest ticket) will be considered
// for execution
//
// NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
//
// DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
// Use ChainGetParentMessages, which will perform correct message deduplication
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) //perm:read
2020-06-18 13:44:47 +00:00
// ChainGetParentReceipts returns receipts for messages in parent tipset of
2021-03-18 18:35:32 +00:00
// the specified block. The receipts in the list returned is one-to-one with the
// messages returned by a call to ChainGetParentMessages with the same blockCid.
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
2020-06-18 13:44:47 +00:00
// ChainGetParentMessages returns messages stored in parent tipset of the
2020-06-30 22:28:49 +00:00
// specified block.
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read
2020-06-18 13:44:47 +00:00
2021-07-01 03:20:22 +00:00
// ChainGetMessagesInTipset returns message stores in current tipset
ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]Message, error) //perm:read
2020-06-18 13:44:47 +00:00
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
2020-07-21 17:44:05 +00:00
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
2020-06-30 22:28:49 +00:00
// will be returned.
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainGetTipSetAfterHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, the first non-nil tipset at a later epoch
// will be returned.
ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
2020-06-18 13:44:47 +00:00
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
2020-06-30 22:28:49 +00:00
// blockstore and returns raw bytes.
ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-09-10 17:35:06 +00:00
// ChainDeleteObj deletes node referenced by the given CID
ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
2020-09-10 17:35:06 +00:00
2020-06-30 22:28:49 +00:00
// ChainHasObj checks if a given CID exists in the chain blockstore.
ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
// ChainPutObj puts a given object into the block store
ChainPutObj(context.Context, blocks.Block) error //perm:admin
// ChainStatObj returns statistics about the graph referenced by 'obj'.
// If 'base' is also specified, then the returned stat will be a diff
// between the two objects.
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// ChainSetHead forcefully sets current chain head. Use with caution.
ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// ChainGetGenesis returns the genesis tipset.
ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// ChainTipSetWeight computes weight for the specified tipset.
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
ChainGetNode(ctx context.Context, p string) (*IpldObject, error) //perm:read
2020-06-18 13:44:47 +00:00
// ChainGetMessage reads a message referenced by the specified CID from the
2020-06-30 22:28:49 +00:00
// chain blockstore.
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
2020-06-18 13:44:47 +00:00
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
// ```
2020-06-18 13:44:47 +00:00
// to
// ^
// from tAA
// ^ ^
// tBA tAB
// ^---*--^
// ^
// tRR
// ```
2020-06-18 13:44:47 +00:00
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// ChainExport returns a stream of bytes with CAR dump of chain data.
// The exported chain data includes the header chain from the given tipset
// back to genesis, the entire genesis state, and the most recent 'nroots'
// state trees.
// If oldmsgskip is set, messages from before the requested roots are also not included.
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
// ChainExportRangeInternal triggers the export of a chain
// CAR-snapshot directly to disk. It is similar to ChainExport,
// except, depending on options, the snapshot can include receipts,
// messages and stateroots for the length between the specified head
// and tail, thus producing "archival-grade" snapshots that include
// all the on-chain data. The header chain is included back to
// genesis and these snapshots can be used to initialize Filecoin
// nodes.
ChainExportRangeInternal(ctx context.Context, head, tail types.TipSetKey, cfg ChainExportConfig) error //perm:admin
2023-03-03 16:14:52 +00:00
// ChainPrune forces compaction on cold store and garbage collects; only supported if you
// are using the splitstore
ChainPrune(ctx context.Context, opts PruneOpts) error //perm:admin
2023-03-03 16:14:52 +00:00
// ChainHotGC does online (badger) GC on the hot store; only supported if you are using
// the splitstore
ChainHotGC(ctx context.Context, opts HotGCOpts) error //perm:admin
2021-07-25 08:14:48 +00:00
// ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore
// if supported by the underlying implementation.
ChainCheckBlockstore(context.Context) error //perm:admin
2021-07-25 08:14:48 +00:00
2021-07-26 05:30:07 +00:00
// ChainBlockstoreInfo returns some basic information about the blockstore
ChainBlockstoreInfo(context.Context) (map[string]interface{}, error) //perm:read
// ChainGetEvents returns the events under an event AMT root CID.
ChainGetEvents(context.Context, cid.Cid) ([]types.Event, error) //perm:read
// GasEstimateFeeCap estimates gas fee cap
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateGasLimit estimates gas used by the message and returns it.
// It fails if message fails to execute.
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
2020-08-11 05:10:12 +00:00
// GasEstimateGasPremium estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs.
2020-08-11 05:10:12 +00:00
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
2020-08-19 23:26:13 +00:00
// GasEstimateMessageGas estimates gas values for unset message gas fields
GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
2020-08-19 21:25:58 +00:00
// MethodGroup: Sync
// The Sync method group contains methods for interacting with and
2020-06-30 22:28:49 +00:00
// observing the lotus sync service.
2020-06-30 22:28:49 +00:00
// SyncState returns the current status of the lotus sync system.
SyncState(context.Context) (*SyncState, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-06-30 22:28:49 +00:00
// SyncSubmitBlock can be used to submit a newly created block to the.
// network through this node
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
2020-06-18 13:44:47 +00:00
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
2020-06-18 13:44:47 +00:00
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
2020-06-18 13:44:47 +00:00
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
2020-06-30 22:28:49 +00:00
// Use with extreme caution.
SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
2020-06-18 13:44:47 +00:00
2020-09-09 07:24:09 +00:00
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
2020-09-09 07:24:09 +00:00
2020-10-10 08:26:42 +00:00
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
SyncUnmarkAllBad(ctx context.Context) error //perm:admin
2020-10-10 08:26:42 +00:00
2020-06-18 13:44:47 +00:00
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
2020-06-30 22:28:49 +00:00
// the reason.
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
// SyncValidateTipset indicates whether the provided tipset is valid or not
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
2020-06-30 22:28:49 +00:00
// MpoolPending returns pending mempool messages.
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
2020-06-18 13:44:47 +00:00
2020-08-05 20:17:14 +00:00
// MpoolSelect returns a list of pending messages for inclusion in the next block
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
2020-08-05 20:17:14 +00:00
2020-06-30 22:28:49 +00:00
// MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
2020-06-18 13:44:47 +00:00
2020-09-18 06:40:43 +00:00
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
2020-09-18 06:40:43 +00:00
2020-06-18 13:44:47 +00:00
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
2020-06-30 22:28:49 +00:00
// to mempool.
2020-08-12 17:06:08 +00:00
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified
//
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
// based on current chain conditions
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) //perm:sign
2020-06-18 13:44:47 +00:00
2020-10-08 02:04:43 +00:00
// MpoolBatchPush batch pushes a signed message to mempool.
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
2020-10-08 02:04:43 +00:00
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
2020-10-08 02:04:43 +00:00
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
2020-10-08 02:04:43 +00:00
// MpoolCheckMessages performs logical checks on a batch of messages
MpoolCheckMessages(context.Context, []*MessagePrototype) ([][]MessageCheckStatus, error) //perm:read
// MpoolCheckPendingMessages performs logical checks for all pending messages from a given address
MpoolCheckPendingMessages(context.Context, address.Address) ([][]MessageCheckStatus, error) //perm:read
// MpoolCheckReplaceMessages performs logical checks on pending messages with replacement
MpoolCheckReplaceMessages(context.Context, []*types.Message) ([][]MessageCheckStatus, error) //perm:read
2020-06-18 13:44:47 +00:00
// MpoolGetNonce gets next nonce for the specified sender.
2020-06-30 22:28:49 +00:00
// Note that this method may not be atomic. Use MpoolPushMessage instead.
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read
2020-06-18 13:44:47 +00:00
2023-03-23 14:54:31 +00:00
// MpoolClear clears pending messages from the mpool.
// If clearLocal is true, ALL messages will be cleared.
// If clearLocal is false, local messages will be protected, all others will be cleared.
MpoolClear(ctx context.Context, clearLocal bool) error //perm:write
2020-08-21 17:28:45 +00:00
2020-08-07 14:45:31 +00:00
// MpoolGetConfig returns (a copy of) the current mpool config
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
2020-08-07 14:45:31 +00:00
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
2020-08-07 14:45:31 +00:00
// MethodGroup: Miner
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) //perm:read
MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) //perm:write
// // UX ?
2021-10-27 00:10:18 +00:00
// MethodGroup: WalletF
2020-06-30 22:28:49 +00:00
// WalletNew creates a new address in the wallet with the given sigType.
// Available key types: bls, secp256k1, secp256k1-ledger
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
2020-06-30 22:28:49 +00:00
// WalletHas indicates whether the given address is in the wallet.
WalletHas(context.Context, address.Address) (bool, error) //perm:write
2020-07-13 05:13:29 +00:00
// WalletList lists all the addresses in the wallet.
WalletList(context.Context) ([]address.Address, error) //perm:write
2020-06-30 22:28:49 +00:00
// WalletBalance returns the balance of the given address at the current head of the chain.
WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
2020-06-30 22:28:49 +00:00
// WalletSign signs the given bytes using the given address.
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
2020-06-30 22:28:49 +00:00
// WalletSignMessage signs the given message using the given address.
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
2020-06-30 22:28:49 +00:00
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
// The address does not have to be in the wallet.
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
2020-06-30 22:28:49 +00:00
// WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
2020-06-30 22:28:49 +00:00
// WalletSetDefault marks the given address as as the default one.
WalletSetDefault(context.Context, address.Address) error //perm:write
2020-06-30 22:28:49 +00:00
// WalletExport returns the private key of an address in the wallet.
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
2020-06-30 22:28:49 +00:00
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
2020-06-30 22:28:49 +00:00
// WalletDelete deletes an address from the wallet.
WalletDelete(context.Context, address.Address) error //perm:admin
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
// Other
// MethodGroup: Client
// The Client methods all have to do with interacting with the storage and
// retrieval markets as a client
2020-06-30 22:28:49 +00:00
// ClientImport imports file under the specified path into filestore.
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin
2020-07-07 11:45:02 +00:00
// ClientRemoveImport removes file import
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
2020-06-30 22:28:49 +00:00
// ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write
2020-06-30 22:28:49 +00:00
// ClientGetDealInfo returns the latest information about a given deal.
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read
2020-06-30 22:28:49 +00:00
// ClientListDeals returns information about the deals made by the local client.
ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write
2020-08-27 18:32:51 +00:00
// ClientGetDealUpdates returns the status of updated deals
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write
// ClientGetDealStatus returns status given a code
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
2020-06-30 22:28:49 +00:00
// ClientHasLocal indicates whether a certain CID is locally stored.
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
2020-06-30 22:28:49 +00:00
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read
2020-06-30 22:28:49 +00:00
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
2020-06-30 22:28:49 +00:00
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
2021-11-10 14:45:46 +00:00
ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin
2021-11-10 16:51:16 +00:00
// ClientRetrieveWait waits for retrieval to be complete
ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin
2021-11-10 14:45:46 +00:00
// ClientExport exports a file stored in the local filestore to a system file
ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin
// ClientListRetrievals returns information about retrievals made by the local client
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
// ClientGetRetrievalUpdates returns status of updated retrieval deals
ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write
2020-06-30 22:28:49 +00:00
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*StorageAsk, error) //perm:read
// ClientCalcCommP calculates the CommP and data size of the specified CID
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read
// ClientCalcCommP calculates the CommP for a specified file
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write
2020-06-30 22:28:49 +00:00
// ClientGenCar generates a CAR file for the specified file.
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write
2020-07-31 16:22:04 +00:00
// ClientDealSize calculates real deal data size
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read
// ClientListTransfers returns the status of all ongoing transfers of data
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
2020-10-22 20:40:26 +00:00
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
2020-09-04 05:34:59 +00:00
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
// which are stuck due to insufficient funds
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
2021-03-24 12:36:21 +00:00
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
2019-07-12 09:59:18 +00:00
// ClientUnimport removes references to the specified file from filestore
// ClientUnimport(path string)
2019-07-12 09:59:18 +00:00
// ClientListImports lists imported files and their root CIDs
ClientListImports(ctx context.Context) ([]Import, error) //perm:write
2019-07-12 09:59:18 +00:00
// ClientListAsks() []Ask
// MethodGroup: State
2020-06-30 22:28:49 +00:00
// The State methods are used to query, inspect, and interact with chain state.
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
2020-06-30 22:28:49 +00:00
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
2020-06-30 22:28:49 +00:00
// StateCall runs the given message and returns its result without any persisted changes.
//
// StateCall applies the message to the tipset's parent state. The
// message is not applied on-top-of the messages in the passed-in
// tipset.
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) //perm:read
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
//
// If a tipset key is provided, and a replacing message is not found on chain,
// the method will return an error saying that the message wasn't found
//
// If no tipset key is provided, the appropriate tipset is looked up, and if
// the message was gas-repriced, the on-chain message will be replayed - in
// that case the returned InvocResult.MsgCid will not match the Cid param
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that InvocResult.MsgCid is equal to the provided Cid.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateGetActor returns the indicated actor's nonce and balance.
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateReadState returns the indicated actor's state.
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
2020-09-29 07:01:52 +00:00
// StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
2021-08-20 15:31:01 +00:00
// StateEncodeParams attempts to encode the provided json params to the binary from
StateEncodeParams(ctx context.Context, toActCode cid.Cid, method abi.MethodNum, params json.RawMessage) ([]byte, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateNetworkName returns the name of the network the node is synced to
StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
2020-07-17 14:26:48 +00:00
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerPower returns the power of the indicated miner
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerInfo returns info about the indicated miner
2022-04-20 21:34:28 +00:00
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (MinerInfo, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerDeadlines returns all the proving deadlines for the given miner
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error) //perm:read
2020-09-17 15:30:15 +00:00
// StateMinerPartitions returns all partitions in the specified deadline
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
2020-07-28 18:55:20 +00:00
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerSectorAllocated checks if a sector number is marked as allocated.
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector.
// Returns nil and no error if the sector isn't precommitted.
//
// Note that the sector number may be allocated while PreCommitInfo is nil. This means that either allocated sector
// numbers were compacted, and the sector number was marked as allocated in order to reduce size of the allocated
// sectors bitfield, or that the sector was precommitted, but the precommit has expired.
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) //perm:read
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
// expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
// StateSectorExpiration returns epoch at which given sector will expire
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
2020-07-14 12:32:17 +00:00
// StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
// StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they must check that MsgLookup.Message is equal to the provided 'cid', or set the
// `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
// set to true, both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
// StateWaitMsg looks back up to limit epochs in the chain for a message.
2020-10-02 14:14:30 +00:00
// If not found, it blocks until the message arrives on chain, and gets to the
// indicated confidence depth.
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they must check that MsgLookup.Message is equal to the provided 'cid', or set the
// `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
// set to true, both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateListActors returns the addresses of every actor in the state
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMarketDeals returns information about every deal in the Storage Market
StateMarketDeals(context.Context, types.TipSetKey) (map[string]*MarketDeal, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMarketStorageDeal returns information about the indicated deal
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read
// StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if
// pending allocation is not found.
2022-10-05 18:43:16 +00:00
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
// StateGetAllocation returns the allocation for a given address and allocation ID.
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
// StateGetAllocations returns the all the allocations for a given client.
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
// StateGetClaim returns the claim for a given address and claim ID.
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read
// StateGetClaims returns the all the claims for a given provider.
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
// StateComputeDataCID computes DataCID from a set of on-chain deals
StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateLookupID retrieves the ID address of the given address
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
2022-04-14 21:30:07 +00:00
// StateAccountKey returns the public key address of the given ID address for secp and bls accounts
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
2022-04-14 21:30:07 +00:00
// StateLookupRobustAddress returns the public key address of the given ID address for non-account addresses (multisig, miners etc)
StateLookupRobustAddress(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateChangedActors returns all the actors whose states change between the two given state CIDs
// TODO: Should this take tipset keys instead?
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) //perm:read
// StateMinerAllocated returns a bitfield containing all sector numbers marked as allocated in miner state
2022-08-22 21:36:24 +00:00
StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) //perm:read
2020-06-30 22:28:49 +00:00
// StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height.
2021-03-19 11:57:59 +00:00
//
// When called, StateCompute will:
// - Load the provided tipset, or use the current chain head if not provided
// - Compute the tipset state of the provided tipset on top of the parent state
// - (note that this step runs before vmheight is applied to the execution)
// - Execute state upgrade if any were scheduled at the epoch, or in null
// blocks preceding the tipset
// - Call the cron actor on null blocks preceding the tipset
// - For each block in the tipset
// - Apply messages in blocks in the specified
// - Award block reward by calling the reward actor
// - Call the cron actor for the current epoch
// - If the specified vmheight is higher than the current epoch, apply any
// needed state upgrades to the state
// - Apply the specified messages to the state
//
// The vmheight parameter sets VM execution epoch, and can be used to simulate
// message execution in different network versions. If the specified vmheight
// epoch is higher than the epoch of the specified tipset, any state upgrades
// until the vmheight will be executed on the state before applying messages
// specified by the user.
//
// Note that the initial tipset state computation is not affected by the
// vmheight parameter - only the messages in the `apply` set are
//
// If the caller wants to simply compute the state, vmheight should be set to
// the epoch of the specified tipset.
//
// Messages in the `apply` parameter must have the correct nonces, and gas
// values set.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) //perm:read
2020-10-01 07:14:59 +00:00
// StateVerifierStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
2022-08-12 07:12:22 +00:00
// StateVerifiedRegistryRootKey returns the address of the Verified Registry's root key
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
// can issue. It takes the deal size and verified status as parameters.
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) //perm:read
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
// This is not used anywhere in the protocol itself, and is only for external consumption.
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
// This is the value reported by the runtime interface to actors code.
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error) //perm:read
2020-09-17 01:56:02 +00:00
// StateNetworkVersion returns the network version at the given tipset
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
2022-06-23 18:17:41 +00:00
// StateActorCodeCIDs returns the CIDs of all the builtin actors for the given network version
2022-06-23 18:05:20 +00:00
StateActorCodeCIDs(context.Context, abinetwork.Version) (map[string]cid.Cid, error) //perm:read
2022-08-22 21:10:03 +00:00
// StateActorManifestCID returns the CID of the builtin actors manifest for the given network version
StateActorManifestCID(context.Context, abinetwork.Version) (cid.Cid, error) //perm:read
2020-08-07 19:57:03 +00:00
// StateGetRandomnessFromTickets is used to sample the chain for randomness.
StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetRandomnessDigestFromTickets. is used to sample the chain for randomness.
2023-08-16 15:55:37 +00:00
StateGetRandomnessDigestFromTickets(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetRandomnessDigestFromBeacon is used to sample the beacon for randomness.
2023-08-16 15:55:37 +00:00
StateGetRandomnessDigestFromBeacon(ctx context.Context, randEpoch abi.ChainEpoch, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetBeaconEntry returns the beacon entry for the given filecoin epoch. If
// the entry has not yet been produced, the call will block until the entry
// becomes available
StateGetBeaconEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
2022-04-24 05:48:07 +00:00
// StateGetNetworkParams return current network params
StateGetNetworkParams(ctx context.Context) (*NetworkParams, error) //perm:read
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
// filecoin network
2020-06-30 22:28:49 +00:00
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
2020-10-09 21:14:21 +00:00
// MsigGetVestingSchedule returns the vesting details of a given multisig.
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error) //perm:read
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
// MsigGetPending returns pending transactions for the given multisig
// wallet. Once pending transactions are fully approved, they will no longer
// appear here.
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) //perm:read
// MsigCreate creates a multisig wallet
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
// <initial balance>, <sender address of the create msg>, <gas price>
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (*MessagePrototype, error) //perm:sign
2020-06-30 22:28:49 +00:00
// MsigPropose proposes a multisig message
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigApprove approves a previously-proposed multisig message by transaction ID
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
MsigApprove(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
// using both transaction ID and a hash of the parameters used in the
// proposal. This method of approval can be used to ensure you only approve
// exactly the transaction you think you are.
2020-06-30 22:28:49 +00:00
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
MsigCancel(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign
2020-06-30 22:28:49 +00:00
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
2020-06-30 22:28:49 +00:00
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigAddPropose proposes adding a signer in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
// MsigAddApprove approves a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
// MsigAddCancel cancels a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (*MessagePrototype, error) //perm:sign
2020-07-16 00:55:27 +00:00
// MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <old signer>, <new signer>
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
2020-07-16 00:55:27 +00:00
// MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <old signer>, <new signer>
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
2020-07-16 00:55:27 +00:00
// MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <old signer>, <new signer>
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
// MsigRemoveSigner proposes the removal of a signer from the multisig.
// It accepts the multisig to make the change on, the proposer address to
// send the message from, the address to be removed, and a boolean
// indicating whether or not the signing threshold should be lowered by one
// along with the address removal.
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*MessagePrototype, error) //perm:sign
// MarketAddBalance adds funds to the market actor
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MarketGetReserved gets the amount of funds that are currently reserved for the address
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
2020-11-10 15:45:48 +00:00
// MarketReserveFunds reserves funds for a deal
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
2020-11-10 15:45:48 +00:00
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
// MarketWithdraw withdraws unlocked funds from the market actor
MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
2019-11-08 17:15:38 +00:00
// MethodGroup: Paych
// The Paych methods are for interacting with and managing payment channels
2022-01-04 19:33:49 +00:00
// PaychGet gets or creates a payment channel between address pair
2022-02-14 18:56:02 +00:00
// The specified amount will be reserved for use. If there aren't enough non-reserved funds
2022-01-04 19:33:49 +00:00
// available, funds will be added through an on-chain message.
2022-01-06 15:04:39 +00:00
// - When opts.OffChain is true, this call will not cause any messages to be sent to the chain (no automatic
// channel creation/funds adding). If the operation can't be performed without sending a message an error will be
// returned. Note that even when this option is specified, this call can be blocked by previous operations on the
// channel waiting for on-chain operations.
2022-02-14 18:56:02 +00:00
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt, opts PaychGetOpts) (*ChannelInfo, error) //perm:sign
// PaychFund gets or creates a payment channel between address pair.
// The specified amount will be added to the channel through on-chain send for future use
PaychFund(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign
PaychList(context.Context) ([]address.Address, error) //perm:read
PaychStatus(context.Context, address.Address) (*PaychStatus, error) //perm:read
PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) //perm:sign
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error) //perm:sign
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
2020-10-01 15:14:08 +00:00
// MethodGroup: Node
// These methods are general node management and status commands
NodeStatus(ctx context.Context, inclChainStatus bool) (NodeStatus, error) //perm:read
// MethodGroup: Eth
// These methods are used for Ethereum-compatible JSON-RPC calls
//
// EthAccounts will always return [] since we don't expect Lotus to manage private keys
EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) //perm:read
// EthAddressToFilecoinAddress converts an EthAddress into an f410 Filecoin Address
EthAddressToFilecoinAddress(ctx context.Context, ethAddress ethtypes.EthAddress) (address.Address, error) //perm:read
// FilecoinAddressToEthAddress converts an f410 or f0 Filecoin Address to an EthAddress
FilecoinAddressToEthAddress(ctx context.Context, filecoinAddress address.Address) (ethtypes.EthAddress, error) //perm:read
// EthBlockNumber returns the height of the latest (heaviest) TipSet
EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
// EthGetBlockTransactionCountByNumber returns the number of messages in the TipSet
EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error) //perm:read
// EthGetBlockTransactionCountByHash returns the number of messages in the TipSet
EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error) //perm:read
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) //perm:read
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read
EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) //perm:read
EthChainId(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error) //perm:read
NetVersion(ctx context.Context) (string, error) //perm:read
NetListening(ctx context.Context) (bool, error) //perm:read
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) //perm:read
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read
EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthUint64, error) //perm:read
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read
// Returns event logs matching given filter spec.
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) //perm:read
// Polling method for a filter, returns event logs which occurred since last poll.
// (requires write perm since timestamp of last filter execution will be written)
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read
// Returns event logs matching filter with given id.
// (requires write perm since timestamp of last filter execution will be written)
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read
// Installs a persistent filter based on given filter spec.
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:read
// Installs a persistent filter to notify when a new block arrives.
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read
// Installs a persistent filter to notify when new messages arrive in the message pool.
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read
// Uninstalls a filter with given id.
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:read
// Subscribe to different event types using websockets
// eventTypes is one or more of:
// - newHeads: notify when new blocks arrive.
// - pendingTransactions: notify when new messages arrive in the message pool.
// - logs: notify new event logs that match a criteria
// params contains additional parameters used with the log event type
// The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called.
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:read
// Unsubscribe from a websocket subscription
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:read
// Returns the client version
Web3ClientVersion(ctx context.Context) (string, error) //perm:read
2023-07-24 14:55:42 +00:00
// TraceAPI related methods
// Returns an OpenEthereum-compatible trace of the given block (implementing `trace_block`),
// translating Filecoin semantics into Ethereum semantics and tracing both EVM and FVM calls.
//
// Features:
//
// - FVM actor create events, calls, etc. show up as if they were EVM smart contract events.
// - Native FVM call inputs are ABI-encoded (Solidity ABI) as if they were calls to a
// `handle_filecoin_method(uint64 method, uint64 codec, bytes params)` function
// (where `codec` is the IPLD codec of `params`).
// - Native FVM call outputs (return values) are ABI-encoded as `(uint32 exit_code, uint64
// codec, bytes output)` where `codec` is the IPLD codec of `output`.
2023-07-24 14:55:42 +00:00
//
// Limitations (for now):
//
// 1. Block rewards are not included in the trace.
// 2. SELFDESTRUCT operations are not included in the trace.
// 3. EVM smart contract "create" events always specify `0xfe` as the "code" for newly created EVM smart contracts.
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read
2023-07-24 14:55:42 +00:00
// Replays all transactions in a block returning the requested traces for each transaction
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
2023-07-24 14:55:42 +00:00
2020-10-01 15:14:08 +00:00
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
2019-07-24 00:09:34 +00:00
}
// reverse interface to the client, called after EthSubscribe
type EthSubscriber interface {
// note: the parameter is ethtypes.EthSubscriptionResponse serialized as json object
2023-02-01 16:41:01 +00:00
EthSubscription(ctx context.Context, r jsonrpc.RawParams) error // rpc_method:eth_subscription notify:true
}
type StorageAsk struct {
Response *storagemarket.StorageAsk
DealProtocols []string
}
type FileRef struct {
Path string
IsCAR bool
}
2019-12-11 23:31:59 +00:00
type MinerSectors struct {
2020-09-15 19:09:39 +00:00
// Live sectors that should be proven.
Live uint64
// Sectors actively contributing to power.
Active uint64
// Sectors with failed proofs.
Faulty uint64
2020-07-14 12:32:17 +00:00
}
2020-07-07 11:45:02 +00:00
type ImportRes struct {
Root cid.Cid
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
ImportID imports.ID
2020-07-07 11:45:02 +00:00
}
type Import struct {
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Key imports.ID
2020-07-07 09:12:32 +00:00
Err string
2020-07-07 08:52:19 +00:00
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
Root *cid.Cid
// Source is the provenance of the import, e.g. "import", "unknown", else.
// Currently useless but may be used in the future.
Source string
// FilePath is the path of the original file. It is important that the file
// is retained at this path, because it will be referenced during
// the transfer (when we do the UnixFS chunking, we don't duplicate the
// leaves, but rather point to chunks of the original data through
// positional references).
FilePath string
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
// CARPath is the path of the CAR file containing the DAG for this import.
CARPath string
}
2019-09-10 14:13:24 +00:00
type DealInfo struct {
ProposalCid cid.Cid
State storagemarket.StorageDealStatus
Message string // more information about deal state, particularly errors
DealStages *storagemarket.DealStages
2019-10-22 10:09:36 +00:00
Provider address.Address
2019-09-10 14:48:54 +00:00
DataRef *storagemarket.DataRef
PieceCID cid.Cid
2019-09-10 14:48:54 +00:00
Size uint64
2019-10-29 10:01:18 +00:00
PricePerEpoch types.BigInt
Duration uint64
DealID abi.DealID
2020-08-15 00:00:31 +00:00
CreationTime time.Time
2020-10-02 22:41:08 +00:00
Verified bool
TransferChannelID *datatransfer.ChannelID
DataTransfer *DataTransferChannel
2019-09-10 14:13:24 +00:00
}
type MsgLookup struct {
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
Receipt types.MessageReceipt
ReturnDec interface{}
2020-07-12 03:54:25 +00:00
TipSet types.TipSetKey
Height abi.ChainEpoch
}
type MsgGasCost struct {
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
GasUsed abi.TokenAmount
BaseFeeBurn abi.TokenAmount
OverEstimationBurn abi.TokenAmount
MinerPenalty abi.TokenAmount
MinerTip abi.TokenAmount
Refund abi.TokenAmount
TotalCost abi.TokenAmount
}
// BlsMessages[x].cid = Cids[x]
// SecpkMessages[y].cid = Cids[BlsMessages.length + y]
type BlockMessages struct {
BlsMessages []*types.Message
SecpkMessages []*types.SignedMessage
2019-09-23 11:15:16 +00:00
Cids []cid.Cid
}
type Message struct {
Cid cid.Cid
Message *types.Message
}
type ActorState struct {
Balance types.BigInt
2021-02-09 22:18:19 +00:00
Code cid.Cid
State interface{}
}
2019-09-06 22:39:47 +00:00
type PCHDir int
const (
PCHUndef PCHDir = iota
PCHInbound
PCHOutbound
)
2022-01-06 15:04:39 +00:00
type PaychGetOpts struct {
OffChain bool
}
2019-09-09 16:02:57 +00:00
type PaychStatus struct {
2019-09-06 22:39:47 +00:00
ControlAddr address.Address
Direction PCHDir
}
2019-08-21 16:31:14 +00:00
type ChannelInfo struct {
Channel address.Address
WaitSentinel cid.Cid
}
2020-09-01 14:33:44 +00:00
type ChannelAvailableFunds struct {
// Channel is the address of the channel
2020-09-01 14:33:44 +00:00
Channel *address.Address
// From is the from address of the channel (channel creator)
From address.Address
// To is the to address of the channel
To address.Address
// ConfirmedAmt is the total amount of funds that have been confirmed on-chain for the channel
2020-09-01 14:33:44 +00:00
ConfirmedAmt types.BigInt
// PendingAmt is the amount of funds that are pending confirmation on-chain
PendingAmt types.BigInt
// NonReservedAmt is part of ConfirmedAmt that is available for use (e.g. when the payment channel was pre-funded)
NonReservedAmt types.BigInt
2022-02-14 20:16:41 +00:00
// PendingAvailableAmt is the amount of funds that are pending confirmation on-chain that will become available once confirmed
PendingAvailableAmt types.BigInt
2020-09-01 14:33:44 +00:00
// PendingWaitSentinel can be used with PaychGetWaitReady to wait for
// confirmation of pending funds
PendingWaitSentinel *cid.Cid
// QueuedAmt is the amount that is queued up behind a pending request
QueuedAmt types.BigInt
2020-09-01 14:33:44 +00:00
// VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain
// and in the local datastore
VoucherReedeemedAmt types.BigInt
}
type PaymentInfo struct {
Channel address.Address
WaitSentinel cid.Cid
Vouchers []*paych.SignedVoucher
2019-09-24 21:13:47 +00:00
}
type VoucherSpec struct {
2020-02-27 21:45:31 +00:00
Amount types.BigInt
TimeLockMin abi.ChainEpoch
TimeLockMax abi.ChainEpoch
MinSettle abi.ChainEpoch
2019-09-24 21:13:47 +00:00
2020-02-25 21:09:22 +00:00
Extra *paych.ModVerifyParams
}
2020-09-01 14:33:44 +00:00
// VoucherCreateResult is the response to calling PaychVoucherCreate
type VoucherCreateResult struct {
// Voucher that was created, or nil if there was an error or if there
// were insufficient funds in the channel
Voucher *paych.SignedVoucher
// Shortfall is the additional amount that would be needed in the channel
// in order to be able to create the voucher
Shortfall types.BigInt
}
2019-08-21 16:31:14 +00:00
type MinerPower struct {
MinerPower power.Claim
TotalPower power.Claim
HasMinPower bool
2019-08-21 16:31:14 +00:00
}
2019-08-26 10:04:57 +00:00
2019-08-26 18:23:11 +00:00
type QueryOffer struct {
2019-08-26 13:45:36 +00:00
Err string
2020-07-09 16:29:57 +00:00
Root cid.Cid
Piece *cid.Cid
2019-08-27 18:45:21 +00:00
Size uint64
MinPrice types.BigInt
UnsealPrice types.BigInt
2021-12-17 14:50:51 +00:00
PricePerByte abi.TokenAmount
PaymentInterval uint64
PaymentIntervalIncrease uint64
Miner address.Address
2020-08-05 22:35:59 +00:00
MinerPeer retrievalmarket.RetrievalPeer
2019-08-26 13:45:36 +00:00
}
2019-08-27 18:45:21 +00:00
2019-12-01 21:52:24 +00:00
func (o *QueryOffer) Order(client address.Address) RetrievalOrder {
2019-08-27 18:45:21 +00:00
return RetrievalOrder{
Root: o.Root,
2020-07-09 16:29:57 +00:00
Piece: o.Piece,
Size: o.Size,
Total: o.MinPrice,
UnsealPrice: o.UnsealPrice,
PaymentInterval: o.PaymentInterval,
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
Client: client,
2019-12-01 21:52:24 +00:00
2020-08-05 22:35:59 +00:00
Miner: o.Miner,
2021-03-30 19:28:54 +00:00
MinerPeer: &o.MinerPeer,
2019-08-27 18:45:21 +00:00
}
}
2020-02-08 02:18:32 +00:00
type MarketBalance struct {
Escrow big.Int
Locked big.Int
}
type MarketDeal struct {
Proposal market.DealProposal
State market.DealState
}
2019-08-27 18:45:21 +00:00
type RetrievalOrder struct {
2021-11-11 15:17:54 +00:00
Root cid.Cid
2021-11-11 15:17:39 +00:00
Piece *cid.Cid
DataSelector *Selector
2021-03-30 19:28:54 +00:00
2021-11-22 13:18:50 +00:00
// todo: Size/Total are only used for calculating price per byte; we should let users just pass that
Size uint64
Total types.BigInt
UnsealPrice types.BigInt
PaymentInterval uint64
PaymentIntervalIncrease uint64
Client address.Address
Miner address.Address
2021-03-30 19:28:54 +00:00
MinerPeer *retrievalmarket.RetrievalPeer
RemoteStore *RemoteStoreID `json:"RemoteStore,omitempty"`
2019-08-27 18:45:21 +00:00
}
2019-09-19 20:25:18 +00:00
type RemoteStoreID = uuid.UUID
type InvocResult struct {
2020-10-14 03:45:47 +00:00
MsgCid cid.Cid
Msg *types.Message
MsgRct *types.MessageReceipt
2020-10-15 22:48:51 +00:00
GasCost MsgGasCost
ExecutionTrace types.ExecutionTrace
Error string
Duration time.Duration
2019-09-19 20:25:18 +00:00
}
type MethodCall struct {
types.MessageReceipt
Error string
}
type StartDealParams struct {
Data *storagemarket.DataRef
Wallet address.Address
Miner address.Address
EpochPrice types.BigInt
MinBlocksDuration uint64
ProviderCollateral big.Int
DealStartEpoch abi.ChainEpoch
FastRetrieval bool
VerifiedDeal bool
}
func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) {
type sdpAlias StartDealParams
sdp := sdpAlias{
FastRetrieval: true,
}
if err := json.Unmarshal(raw, &sdp); err != nil {
return err
}
*s = StartDealParams(sdp)
return nil
}
2020-03-07 02:47:19 +00:00
type IpldObject struct {
Cid cid.Cid
Obj interface{}
}
2019-11-16 01:05:16 +00:00
type ActiveSync struct {
WorkerID uint64
Base *types.TipSet
Target *types.TipSet
2019-09-30 22:29:40 +00:00
Stage SyncStateStage
2020-02-08 02:18:32 +00:00
Height abi.ChainEpoch
2019-12-04 04:59:41 +00:00
Start time.Time
End time.Time
Message string
}
2019-09-30 22:29:40 +00:00
2019-11-16 01:05:16 +00:00
type SyncState struct {
ActiveSyncs []ActiveSync
2020-09-24 11:35:45 +00:00
VMApplied uint64
2019-11-16 01:05:16 +00:00
}
2019-09-30 22:29:40 +00:00
type SyncStateStage int
const (
StageIdle = SyncStateStage(iota)
StageHeaders
StagePersistHeaders
StageMessages
StageSyncComplete
2019-12-04 03:56:29 +00:00
StageSyncErrored
StageFetchingMessages
2019-09-30 22:29:40 +00:00
)
2019-11-17 07:44:06 +00:00
func (v SyncStateStage) String() string {
switch v {
2021-03-02 04:56:51 +00:00
case StageIdle:
return "idle"
case StageHeaders:
return "header sync"
case StagePersistHeaders:
return "persisting headers"
case StageMessages:
return "message sync"
case StageSyncComplete:
return "complete"
case StageSyncErrored:
return "error"
case StageFetchingMessages:
return "fetching messages"
default:
return fmt.Sprintf("<unknown: %d>", v)
}
}
2019-11-17 07:44:06 +00:00
type MpoolChange int
const (
MpoolAdd MpoolChange = iota
MpoolRemove
)
type MpoolUpdate struct {
2019-11-18 21:39:07 +00:00
Type MpoolChange
2019-11-17 07:44:06 +00:00
Message *types.SignedMessage
}
2020-02-27 21:45:31 +00:00
2020-03-08 02:31:36 +00:00
type ComputeStateOutput struct {
Root cid.Cid
Trace []*InvocResult
}
2020-07-30 12:31:31 +00:00
type DealCollateralBounds struct {
Min abi.TokenAmount
Max abi.TokenAmount
}
type CirculatingSupply struct {
FilVested abi.TokenAmount
FilMined abi.TokenAmount
FilBurnt abi.TokenAmount
FilLocked abi.TokenAmount
FilCirculating abi.TokenAmount
FilReserveDisbursed abi.TokenAmount
}
type MiningBaseInfo struct {
MinerPower types.BigInt
NetworkPower types.BigInt
Sectors []builtin.ExtendedSectorInfo
WorkerKey address.Address
SectorSize abi.SectorSize
PrevBeaconEntry types.BeaconEntry
BeaconEntries []types.BeaconEntry
EligibleForMining bool
}
2020-04-09 00:24:10 +00:00
type BlockTemplate struct {
Miner address.Address
Parents types.TipSetKey
Ticket *types.Ticket
Eproof *types.ElectionProof
BeaconValues []types.BeaconEntry
Messages []*types.SignedMessage
Epoch abi.ChainEpoch
Timestamp uint64
WinningPoStProof []builtin.PoStProof
}
2020-07-31 16:22:04 +00:00
type DataSize struct {
PayloadSize int64
PieceSize abi.PaddedPieceSize
}
type DataCIDSize struct {
PayloadSize int64
PieceSize abi.PaddedPieceSize
PieceCID cid.Cid
}
type CommPRet struct {
Root cid.Cid
Size abi.UnpaddedPieceSize
}
type HeadChange struct {
Type string
Val *types.TipSet
}
type MsigProposeResponse int
const (
MsigApprove MsigProposeResponse = iota
MsigCancel
)
2020-09-17 15:30:15 +00:00
type Deadline struct {
2021-01-18 08:46:22 +00:00
PostSubmissions bitfield.BitField
DisputableProofCount uint64
2020-09-17 15:30:15 +00:00
}
type Partition struct {
AllSectors bitfield.BitField
FaultySectors bitfield.BitField
RecoveringSectors bitfield.BitField
LiveSectors bitfield.BitField
ActiveSectors bitfield.BitField
}
type Fault struct {
Miner address.Address
Epoch abi.ChainEpoch
}
var EmptyVesting = MsigVesting{
InitialBalance: types.EmptyInt,
StartEpoch: -1,
UnlockDuration: -1,
}
type MsigVesting struct {
InitialBalance abi.TokenAmount
StartEpoch abi.ChainEpoch
UnlockDuration abi.ChainEpoch
}
2020-10-15 15:54:36 +00:00
type MessageMatch struct {
To address.Address
From address.Address
}
type MsigTransaction struct {
ID int64
To address.Address
Value abi.TokenAmount
Method abi.MethodNum
Params []byte
Approved []address.Address
}
type PruneOpts struct {
MovingGC bool
RetainState int64
}
2023-03-03 16:14:52 +00:00
type HotGCOpts struct {
Threshold float64
Periodic bool
Moving bool
2023-03-03 16:14:52 +00:00
}
type EthTxReceipt struct {
TransactionHash ethtypes.EthHash `json:"transactionHash"`
TransactionIndex ethtypes.EthUint64 `json:"transactionIndex"`
BlockHash ethtypes.EthHash `json:"blockHash"`
BlockNumber ethtypes.EthUint64 `json:"blockNumber"`
From ethtypes.EthAddress `json:"from"`
To *ethtypes.EthAddress `json:"to"`
StateRoot ethtypes.EthHash `json:"root"`
Status ethtypes.EthUint64 `json:"status"`
ContractAddress *ethtypes.EthAddress `json:"contractAddress"`
CumulativeGasUsed ethtypes.EthUint64 `json:"cumulativeGasUsed"`
GasUsed ethtypes.EthUint64 `json:"gasUsed"`
EffectiveGasPrice ethtypes.EthBigInt `json:"effectiveGasPrice"`
LogsBloom ethtypes.EthBytes `json:"logsBloom"`
Logs []ethtypes.EthLog `json:"logs"`
Type ethtypes.EthUint64 `json:"type"`
}