lotus/api/api_storage.go

502 lines
28 KiB
Go
Raw Normal View History

2019-11-01 12:01:16 +00:00
package api
import (
2020-03-03 22:19:22 +00:00
"bytes"
2019-11-01 12:01:16 +00:00
"context"
2020-07-08 18:35:55 +00:00
"time"
"github.com/google/uuid"
2020-07-08 18:35:55 +00:00
"github.com/ipfs/go-cid"
2022-08-25 18:20:41 +00:00
"github.com/libp2p/go-libp2p/core/peer"
2020-07-08 18:35:55 +00:00
"github.com/filecoin-project/go-address"
2022-08-18 14:37:22 +00:00
"github.com/filecoin-project/go-bitfield"
2020-12-02 19:46:07 +00:00
datatransfer "github.com/filecoin-project/go-data-transfer"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-jsonrpc/auth"
2020-09-07 03:49:10 +00:00
"github.com/filecoin-project/go-state-types/abi"
2022-04-20 21:34:28 +00:00
"github.com/filecoin-project/go-state-types/builtin/v8/market"
2022-09-06 15:49:29 +00:00
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
abinetwork "github.com/filecoin-project/go-state-types/network"
2020-12-02 19:46:07 +00:00
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/lotus/chain/actors/builtin"
2020-06-01 12:59:51 +00:00
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
2019-11-01 12:01:16 +00:00
)
2021-04-26 18:36:20 +00:00
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
2019-11-01 12:01:16 +00:00
// StorageMiner is a low-level interface to the Filecoin network storage miner node
type StorageMiner interface {
Common
Net
2019-11-01 12:01:16 +00:00
ActorAddress(context.Context) (address.Address, error) //perm:read
2019-11-01 12:01:16 +00:00
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
2022-07-29 14:39:49 +00:00
// WithdrawBalance allows to withdraw balance from miner actor to owner address
// Specify amount as "0" to withdraw full balance. This method returns a message CID
2022-08-01 18:11:24 +00:00
// and does not wait for message execution
2022-08-02 07:34:54 +00:00
ActorWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) //perm:admin
2022-07-29 14:39:49 +00:00
MiningBase(context.Context) (*types.TipSet, error) //perm:read
2020-04-23 21:12:42 +00:00
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) //perm:admin
2022-04-26 19:37:48 +00:00
2019-11-01 12:01:16 +00:00
// Temp api for testing
PledgeSector(context.Context) (abi.SectorID, error) //perm:write
2019-11-01 12:01:16 +00:00
// Get the status of a given sector by ID
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read
2019-11-01 12:01:16 +00:00
// Add piece to an open sector. If no sectors with enough space are open,
// either a new sector will be created, or this call will block until more
// sectors can be created.
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin
SectorsUnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin
2019-11-01 12:01:16 +00:00
// List all staged sectors
SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read
2019-11-01 12:01:16 +00:00
2020-12-06 00:51:48 +00:00
// Get summary info of sectors
SectorsSummary(ctx context.Context) (map[SectorState]int, error) //perm:read
2020-12-06 00:51:48 +00:00
// List sectors in particular states
SectorsListInStates(context.Context, []SectorState) ([]abi.SectorNumber, error) //perm:read
2020-12-06 00:51:48 +00:00
SectorsRefs(context.Context) (map[string][]SealedRef, error) //perm:read
2019-11-08 18:15:13 +00:00
// SectorStartSealing can be called on sectors in Empty or WaitDeals states
2020-06-26 15:28:05 +00:00
// to trigger sealing early
SectorStartSealing(context.Context, abi.SectorNumber) error //perm:write
// SectorSetSealDelay sets the time that a newly-created sector
// waits for more deals before it starts sealing
SectorSetSealDelay(context.Context, time.Duration) error //perm:write
// SectorGetSealDelay gets the time that a newly-created sector
// waits for more deals before it starts sealing
SectorGetSealDelay(context.Context) (time.Duration, error) //perm:read
// SectorSetExpectedSealDuration sets the expected time for a sector to seal
SectorSetExpectedSealDuration(context.Context, time.Duration) error //perm:write
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
SectorGetExpectedSealDuration(context.Context) (time.Duration, error) //perm:read
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin
2021-01-12 23:42:01 +00:00
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
2022-03-01 18:27:03 +00:00
SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
2021-01-13 22:32:04 +00:00
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
// automatically removes it from storage
SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin
2021-01-13 22:32:04 +00:00
// SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
// Returns null if message wasn't sent
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
2021-01-14 11:37:23 +00:00
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
2022-03-01 18:27:03 +00:00
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
2021-05-18 14:51:06 +00:00
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
// Returns null if message wasn't sent
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
2021-05-18 14:51:06 +00:00
// SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
2021-05-25 14:07:45 +00:00
// SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
// Returns null if message wasn't sent
2021-06-01 09:56:19 +00:00
SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
2021-05-25 14:07:45 +00:00
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
2020-03-16 17:50:07 +00:00
2022-08-18 14:37:22 +00:00
// SectorNumAssignerMeta returns sector number assigner metadata - reserved/allocated
2022-08-24 15:25:37 +00:00
SectorNumAssignerMeta(ctx context.Context) (NumAssignerMeta, error) //perm:read
// SectorNumReservations returns a list of sector number reservations
SectorNumReservations(ctx context.Context) (map[string]bitfield.BitField, error) //perm:read
// SectorNumReserve creates a new sector number reservation. Will fail if any other reservation has colliding
// numbers or name. Set force to true to override safety checks.
// Valid characters for name: a-z, A-Z, 0-9, _, -
SectorNumReserve(ctx context.Context, name string, sectors bitfield.BitField, force bool) error //perm:admin
// SectorNumReserveCount creates a new sector number reservation for `count` sector numbers.
// by default lotus will allocate lowest-available sector numbers to the reservation.
// For restrictions on `name` see SectorNumReserve
SectorNumReserveCount(ctx context.Context, name string, count uint64) (bitfield.BitField, error) //perm:admin
// SectorNumFree drops a sector reservation
SectorNumFree(ctx context.Context, name string) error //perm:admin
2022-08-18 14:37:22 +00:00
2020-03-11 01:57:52 +00:00
// WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error //perm:admin retry:true
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
//storiface.WorkerReturn
ReturnDataCid(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storiface.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storiface.SectorCids, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storiface.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storiface.Proof, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storiface.ReplicaUpdateOut, err *storiface.CallError) error //perm:admin retry:true
ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storiface.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true
ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storiface.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true
ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
2020-03-23 14:56:22 +00:00
// SealingSchedDiag dumps internal sealing scheduler state
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
2022-07-20 04:50:05 +00:00
//SealingSchedRemove removes a request from sealing pipeline
2022-08-02 09:47:30 +00:00
SealingRemoveRequest(ctx context.Context, schedId uuid.UUID) error //perm:admin
2022-07-01 16:02:10 +00:00
// paths.SectorIndex
StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error //perm:admin
StorageDetach(ctx context.Context, id storiface.ID, url string) error //perm:admin
2022-07-01 16:02:10 +00:00
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
StorageReportHealth(context.Context, storiface.ID, storiface.HealthReport) error //perm:admin
StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
// StorageFindSector returns list of paths where the specified sector files exist.
//
// If allowFetch is set, list of paths to which the sector can be fetched will also be returned.
// - Paths which have sector files locally (don't require fetching) will be listed first.
// - Paths which have sector files locally will not be filtered based on based on AllowTypes/DenyTypes.
// - Paths which require fetching will be filtered based on AllowTypes/DenyTypes. If multiple
// file types are specified, each type will be considered individually, and a union of all paths
// which can accommodate each file type will be returned.
2022-01-18 10:57:04 +00:00
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
2022-07-01 16:02:10 +00:00
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
// Paths with more weight and more % of free space are preferred.
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
2022-01-18 10:57:04 +00:00
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
StorageAuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
StorageAddLocal(ctx context.Context, path string) error //perm:admin
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
2022-07-13 18:09:21 +00:00
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
// MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync
MarketDataTransferDiagnostics(ctx context.Context, p peer.ID) (*TransferDiagnostics, error) //perm:write
2021-02-05 17:58:55 +00:00
// MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
2021-02-05 17:58:55 +00:00
// MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error //perm:admin
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
// DagstoreListShards returns information about all shards known to the
// DAG store. Only available on nodes running the markets subsystem.
DagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:read
// DagstoreInitializeShard initializes an uninitialized shard.
//
// Initialization consists of fetching the shard's data (deal payload) from
// the storage subsystem, generating an index, and persisting the index
// to facilitate later retrievals, and/or to publish to external sources.
//
// This operation is intended to complement the initial migration. The
// migration registers a shard for every unique piece CID, with lazy
// initialization. Thus, shards are not initialized immediately to avoid
// IO activity competing with proving. Instead, shard are initialized
// when first accessed. This method forces the initialization of a shard by
// accessing it and immediately releasing it. This is useful to warm up the
// cache to facilitate subsequent retrievals, and to generate the indexes
// to publish them externally.
//
// This operation fails if the shard is not in ShardStateNew state.
// It blocks until initialization finishes.
DagstoreInitializeShard(ctx context.Context, key string) error //perm:write
// DagstoreRecoverShard attempts to recover a failed shard.
//
// This operation fails if the shard is not in ShardStateErrored state.
// It blocks until recovery finishes. If recovery failed, it returns the
// error.
DagstoreRecoverShard(ctx context.Context, key string) error //perm:write
// DagstoreInitializeAll initializes all uninitialized shards in bulk,
// according to the policy passed in the parameters.
//
// It is recommended to set a maximum concurrency to avoid extreme
// IO pressure if the storage subsystem has a large amount of deals.
//
// It returns a stream of events to report progress.
DagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:write
// DagstoreGC runs garbage collection on the DAG store.
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
2022-05-12 14:16:53 +00:00
// DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID
DagstoreRegisterShard(ctx context.Context, key string) error //perm:admin
// IndexerAnnounceDeal informs indexer nodes that a new deal was received,
// so they can download its index
IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin
2022-01-12 14:06:48 +00:00
// IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals.
IndexerAnnounceAllDeals(ctx context.Context) error //perm:admin
// DagstoreLookupPieces returns information about shards that contain the given CID.
DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]DagstoreShardInfo, error) //perm:admin
2021-07-28 18:51:45 +00:00
// RuntimeSubsystems returns the subsystems that are enabled
// in this instance.
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin
DealsList(ctx context.Context) ([]*MarketDeal, error) //perm:admin
DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin
DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin
DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read
2020-10-01 11:58:26 +00:00
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus-miner is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
2020-11-26 07:02:43 +00:00
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
2021-03-26 05:32:03 +00:00
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
// RecoverFault can be used to declare recoveries manually. It sends messages
// to the miner actor with details of recovered sectors and returns the CID of messages. It honors the
// maxPartitionsPerRecoveryMessage from the config
RecoverFault(ctx context.Context, sectors []abi.SectorNumber) ([]cid.Cid, error) //perm:admin
2020-02-28 18:06:59 +00:00
}
var _ storiface.WorkerReturn = *new(StorageMiner)
2020-02-28 18:06:59 +00:00
type SealRes struct {
Err string
GoErr error `json:"-"`
2019-11-21 00:52:59 +00:00
2020-02-28 18:06:59 +00:00
Proof []byte
2019-11-08 18:15:13 +00:00
}
type SectorLog struct {
Kind string
Timestamp uint64
Trace string
Message string
}
2021-09-02 19:44:26 +00:00
type SectorPiece struct {
Piece abi.PieceInfo
DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
}
2019-11-08 18:15:13 +00:00
type SectorInfo struct {
2022-01-18 10:25:04 +00:00
SectorID abi.SectorNumber
State SectorState
CommD *cid.Cid
CommR *cid.Cid
Proof []byte
Deals []abi.DealID
Pieces []SectorPiece
Ticket SealTicket
Seed SealSeed
PreCommitMsg *cid.Cid
CommitMsg *cid.Cid
Retries uint64
ToUpgrade bool
ReplicaUpdateMessage *cid.Cid
2019-12-09 16:40:15 +00:00
LastErr string
Log []SectorLog
// On Chain Info
SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s
2020-07-27 23:22:41 +00:00
Activation abi.ChainEpoch // Epoch during which the sector proof was accepted
Expiration abi.ChainEpoch // Epoch during which the sector expires
DealWeight abi.DealWeight // Integral of active deals over sector lifetime
VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime
InitialPledge abi.TokenAmount // Pledge collected to commit this sector
// Expiration Info
OnTime abi.ChainEpoch
// non-zero if sector is faulty, epoch at which it will be permanently
// removed if it doesn't recover
Early abi.ChainEpoch
2019-11-01 12:01:16 +00:00
}
type SealedRef struct {
2020-02-08 02:18:32 +00:00
SectorID abi.SectorNumber
Offset abi.PaddedPieceSize
2020-02-08 02:18:32 +00:00
Size abi.UnpaddedPieceSize
2019-11-06 12:22:08 +00:00
}
type SealedRefs struct {
Refs []SealedRef
2019-11-01 12:01:16 +00:00
}
2020-03-03 22:19:22 +00:00
type SealTicket struct {
Value abi.SealRandomness
Epoch abi.ChainEpoch
}
type SealSeed struct {
Value abi.InteractiveSealRandomness
Epoch abi.ChainEpoch
}
func (st *SealTicket) Equals(ost *SealTicket) bool {
return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch
}
func (st *SealSeed) Equals(ost *SealSeed) bool {
return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch
2020-03-05 19:21:06 +00:00
}
2020-06-01 12:59:51 +00:00
type SectorState string
2020-12-02 19:46:07 +00:00
2020-12-02 20:47:45 +00:00
type AddrUse int
const (
PreCommitAddr AddrUse = iota
CommitAddr
DealPublishAddr
2020-12-02 20:47:45 +00:00
PoStAddr
2021-01-12 23:42:01 +00:00
TerminateSectorsAddr
2020-12-02 20:47:45 +00:00
)
2020-12-02 19:46:07 +00:00
type AddressConfig struct {
PreCommitControl []address.Address
CommitControl []address.Address
TerminateControl []address.Address
DealPublishControl []address.Address
DisableOwnerFallback bool
DisableWorkerFallback bool
2020-12-02 19:46:07 +00:00
}
2021-02-05 17:58:55 +00:00
// PendingDealInfo has info about pending deals and when they are due to be
// published
type PendingDealInfo struct {
2021-02-05 21:26:37 +00:00
Deals []market.ClientDealProposal
2021-02-05 17:58:55 +00:00
PublishPeriodStart time.Time
PublishPeriod time.Duration
}
type SectorOffset struct {
Sector abi.SectorNumber
Offset abi.PaddedPieceSize
}
// DealInfo is a tuple of deal identity and its schedule
type PieceDealInfo struct {
PublishCid *cid.Cid
DealID abi.DealID
DealProposal *market.DealProposal
DealSchedule DealSchedule
KeepUnsealed bool
}
// DealSchedule communicates the time interval of a storage deal. The deal must
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
// is invalid.
type DealSchedule struct {
StartEpoch abi.ChainEpoch
EndEpoch abi.ChainEpoch
}
integrate DAG store and CARv2 in deal-making (#6671) This commit removes badger from the deal-making processes, and moves to a new architecture with the dagstore as the cental component on the miner-side, and CARv2s on the client-side. Every deal that has been handed off to the sealing subsystem becomes a shard in the dagstore. Shards are mounted via the LotusMount, which teaches the dagstore how to load the related piece when serving retrievals. When the miner starts the Lotus for the first time with this patch, we will perform a one-time migration of all active deals into the dagstore. This is a lightweight process, and it consists simply of registering the shards in the dagstore. Shards are backed by the unsealed copy of the piece. This is currently a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so when it's time to acquire a shard to serve a retrieval, the unsealed CARv1 is joined with its index (safeguarded by the dagstore), to form a read-only blockstore, thus taking the place of the monolithic badger. Data transfers have been adjusted to interface directly with CARv2 files. On inbound transfers (client retrievals, miner storage deals), we stream the received data into a CARv2 ReadWrite blockstore. On outbound transfers (client storage deals, miner retrievals), we serve the data off a CARv2 ReadOnly blockstore. Client-side imports are managed by the refactored *imports.Manager component (when not using IPFS integration). Just like it before, we use the go-filestore library to avoid duplicating the data from the original file in the resulting UnixFS DAG (concretely the leaves). However, the target of those imports are what we call "ref-CARv2s": CARv2 files placed under the `$LOTUS_PATH/imports` directory, containing the intermediate nodes in full, and the leaves as positional references to the original file on disk. Client-side retrievals are placed into CARv2 files in the location: `$LOTUS_PATH/retrievals`. A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore` subcommands have been introduced on the miner-side to inspect and manage the dagstore. Despite moving to a CARv2-backed system, the IPFS integration has been respected, and it continues to be possible to make storage deals with data held in an IPFS node, and to perform retrievals directly into an IPFS node. NOTE: because the "staging" and "client" Badger blockstores are no longer used, existing imports on the client will be rendered useless. On startup, Lotus will enumerate all imports and print WARN statements on the log for each import that needs to be reimported. These log lines contain these messages: - import lacks carv2 path; import will not work; please reimport - import has missing/broken carv2; please reimport At the end, we will print a "sanity check completed" message indicating the count of imports found, and how many were deemed broken. Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com> Co-authored-by: Raúl Kripalani <raul@protocol.ai> Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that
// we expose through JSON-RPC to avoid clients having to depend on the
// dagstore lib.
type DagstoreShardInfo struct {
Key string
State string
Error string
}
// DagstoreShardResult enumerates results per shard.
type DagstoreShardResult struct {
Key string
Success bool
Error string
}
type DagstoreInitializeAllParams struct {
MaxConcurrency int
IncludeSealed bool
}
// DagstoreInitializeAllEvent represents an initialization event.
type DagstoreInitializeAllEvent struct {
Key string
Event string // "start", "end"
Success bool
Error string
Total int
Current int
}
2022-08-18 14:37:22 +00:00
type NumAssignerMeta struct {
Reserved bitfield.BitField
Allocated bitfield.BitField
2022-08-24 15:25:37 +00:00
// ChainAllocated+Reserved+Allocated
InUse bitfield.BitField
2022-08-18 14:37:22 +00:00
Next abi.SectorNumber
}