cleanup: miner: remove markets and deal-making from Lotus Miner (#12005)
* remove client CLI * remove markets CLI from miner * remove markets from all CLI * remove client API * update go mod * remove EnableMarkets flag * remove market subsystem * remove dagstore * remove index provider * remove graphsync and data-transfer * remove markets * go mod tidy * fix cbor gen deps * remove deal making from config * remove eol alert * go mod tidy * changes as per review * make jen * changes as per review * merge master * remove libp2p from config * miner does not have libp2p conn in api test
This commit is contained in:
parent
469960ce0e
commit
e3deda0b2b
1
.github/workflows/check.yml
vendored
1
.github/workflows/check.yml
vendored
@ -44,7 +44,6 @@ jobs:
|
||||
- uses: ./.github/actions/install-go
|
||||
- run: make deps lotus
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||
- run: make gen
|
||||
- run: git diff --exit-code
|
||||
- run: make docsgen-cli
|
||||
|
||||
143
api/api_full.go
143
api/api_full.go
@ -12,9 +12,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
@ -33,7 +30,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode
|
||||
@ -869,17 +865,6 @@ type EthSubscriber interface {
|
||||
EthSubscription(ctx context.Context, r jsonrpc.RawParams) error // rpc_method:eth_subscription notify:true
|
||||
}
|
||||
|
||||
type StorageAsk struct {
|
||||
Response *storagemarket.StorageAsk
|
||||
|
||||
DealProtocols []string
|
||||
}
|
||||
|
||||
type FileRef struct {
|
||||
Path string
|
||||
IsCAR bool
|
||||
}
|
||||
|
||||
type MinerSectors struct {
|
||||
// Live sectors that should be proven.
|
||||
Live uint64
|
||||
@ -889,55 +874,6 @@ type MinerSectors struct {
|
||||
Faulty uint64
|
||||
}
|
||||
|
||||
type ImportRes struct {
|
||||
Root cid.Cid
|
||||
ImportID imports.ID
|
||||
}
|
||||
|
||||
type Import struct {
|
||||
Key imports.ID
|
||||
Err string
|
||||
|
||||
Root *cid.Cid
|
||||
|
||||
// Source is the provenance of the import, e.g. "import", "unknown", else.
|
||||
// Currently useless but may be used in the future.
|
||||
Source string
|
||||
|
||||
// FilePath is the path of the original file. It is important that the file
|
||||
// is retained at this path, because it will be referenced during
|
||||
// the transfer (when we do the UnixFS chunking, we don't duplicate the
|
||||
// leaves, but rather point to chunks of the original data through
|
||||
// positional references).
|
||||
FilePath string
|
||||
|
||||
// CARPath is the path of the CAR file containing the DAG for this import.
|
||||
CARPath string
|
||||
}
|
||||
|
||||
type DealInfo struct {
|
||||
ProposalCid cid.Cid
|
||||
State storagemarket.StorageDealStatus
|
||||
Message string // more information about deal state, particularly errors
|
||||
DealStages *storagemarket.DealStages
|
||||
Provider address.Address
|
||||
|
||||
DataRef *storagemarket.DataRef
|
||||
PieceCID cid.Cid
|
||||
Size uint64
|
||||
|
||||
PricePerEpoch types.BigInt
|
||||
Duration uint64
|
||||
|
||||
DealID abi.DealID
|
||||
|
||||
CreationTime time.Time
|
||||
Verified bool
|
||||
|
||||
TransferChannelID *datatransfer.ChannelID
|
||||
DataTransfer *DataTransferChannel
|
||||
}
|
||||
|
||||
type MsgLookup struct {
|
||||
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
|
||||
Receipt types.MessageReceipt
|
||||
@ -1059,38 +995,6 @@ type MinerPower struct {
|
||||
HasMinPower bool
|
||||
}
|
||||
|
||||
type QueryOffer struct {
|
||||
Err string
|
||||
|
||||
Root cid.Cid
|
||||
Piece *cid.Cid
|
||||
|
||||
Size uint64
|
||||
MinPrice types.BigInt
|
||||
UnsealPrice types.BigInt
|
||||
PricePerByte abi.TokenAmount
|
||||
PaymentInterval uint64
|
||||
PaymentIntervalIncrease uint64
|
||||
Miner address.Address
|
||||
MinerPeer retrievalmarket.RetrievalPeer
|
||||
}
|
||||
|
||||
func (o *QueryOffer) Order(client address.Address) RetrievalOrder {
|
||||
return RetrievalOrder{
|
||||
Root: o.Root,
|
||||
Piece: o.Piece,
|
||||
Size: o.Size,
|
||||
Total: o.MinPrice,
|
||||
UnsealPrice: o.UnsealPrice,
|
||||
PaymentInterval: o.PaymentInterval,
|
||||
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
|
||||
Client: client,
|
||||
|
||||
Miner: o.Miner,
|
||||
MinerPeer: &o.MinerPeer,
|
||||
}
|
||||
}
|
||||
|
||||
type MarketBalance struct {
|
||||
Escrow big.Int
|
||||
Locked big.Int
|
||||
@ -1145,25 +1049,6 @@ type MarketDeal struct {
|
||||
State MarketDealState
|
||||
}
|
||||
|
||||
type RetrievalOrder struct {
|
||||
Root cid.Cid
|
||||
Piece *cid.Cid
|
||||
DataSelector *Selector
|
||||
|
||||
// todo: Size/Total are only used for calculating price per byte; we should let users just pass that
|
||||
Size uint64
|
||||
Total types.BigInt
|
||||
|
||||
UnsealPrice types.BigInt
|
||||
PaymentInterval uint64
|
||||
PaymentIntervalIncrease uint64
|
||||
Client address.Address
|
||||
Miner address.Address
|
||||
MinerPeer *retrievalmarket.RetrievalPeer
|
||||
|
||||
RemoteStore *RemoteStoreID `json:"RemoteStore,omitempty"`
|
||||
}
|
||||
|
||||
type RemoteStoreID = uuid.UUID
|
||||
|
||||
type InvocResult struct {
|
||||
@ -1181,34 +1066,6 @@ type MethodCall struct {
|
||||
Error string
|
||||
}
|
||||
|
||||
type StartDealParams struct {
|
||||
Data *storagemarket.DataRef
|
||||
Wallet address.Address
|
||||
Miner address.Address
|
||||
EpochPrice types.BigInt
|
||||
MinBlocksDuration uint64
|
||||
ProviderCollateral big.Int
|
||||
DealStartEpoch abi.ChainEpoch
|
||||
FastRetrieval bool
|
||||
VerifiedDeal bool
|
||||
}
|
||||
|
||||
func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) {
|
||||
type sdpAlias StartDealParams
|
||||
|
||||
sdp := sdpAlias{
|
||||
FastRetrieval: true,
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(raw, &sdp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*s = StartDealParams(sdp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type IpldObject struct {
|
||||
Cid cid.Cid
|
||||
Obj interface{}
|
||||
|
||||
@ -7,14 +7,9 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
@ -215,110 +210,12 @@ type StorageMiner interface {
|
||||
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
||||
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
|
||||
|
||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
||||
|
||||
// MarketListRetrievalDeals is deprecated, returns empty list
|
||||
MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read
|
||||
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
|
||||
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
|
||||
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
|
||||
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read
|
||||
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin
|
||||
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read
|
||||
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
|
||||
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
|
||||
// MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync
|
||||
MarketDataTransferDiagnostics(ctx context.Context, p peer.ID) (*TransferDiagnostics, error) //perm:write
|
||||
// MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||
// MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
|
||||
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
|
||||
MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error //perm:admin
|
||||
|
||||
// DagstoreListShards returns information about all shards known to the
|
||||
// DAG store. Only available on nodes running the markets subsystem.
|
||||
DagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:read
|
||||
|
||||
// DagstoreInitializeShard initializes an uninitialized shard.
|
||||
//
|
||||
// Initialization consists of fetching the shard's data (deal payload) from
|
||||
// the storage subsystem, generating an index, and persisting the index
|
||||
// to facilitate later retrievals, and/or to publish to external sources.
|
||||
//
|
||||
// This operation is intended to complement the initial migration. The
|
||||
// migration registers a shard for every unique piece CID, with lazy
|
||||
// initialization. Thus, shards are not initialized immediately to avoid
|
||||
// IO activity competing with proving. Instead, shard are initialized
|
||||
// when first accessed. This method forces the initialization of a shard by
|
||||
// accessing it and immediately releasing it. This is useful to warm up the
|
||||
// cache to facilitate subsequent retrievals, and to generate the indexes
|
||||
// to publish them externally.
|
||||
//
|
||||
// This operation fails if the shard is not in ShardStateNew state.
|
||||
// It blocks until initialization finishes.
|
||||
DagstoreInitializeShard(ctx context.Context, key string) error //perm:write
|
||||
|
||||
// DagstoreRecoverShard attempts to recover a failed shard.
|
||||
//
|
||||
// This operation fails if the shard is not in ShardStateErrored state.
|
||||
// It blocks until recovery finishes. If recovery failed, it returns the
|
||||
// error.
|
||||
DagstoreRecoverShard(ctx context.Context, key string) error //perm:write
|
||||
|
||||
// DagstoreInitializeAll initializes all uninitialized shards in bulk,
|
||||
// according to the policy passed in the parameters.
|
||||
//
|
||||
// It is recommended to set a maximum concurrency to avoid extreme
|
||||
// IO pressure if the storage subsystem has a large amount of deals.
|
||||
//
|
||||
// It returns a stream of events to report progress.
|
||||
DagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:write
|
||||
|
||||
// DagstoreGC runs garbage collection on the DAG store.
|
||||
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
|
||||
|
||||
// DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID
|
||||
DagstoreRegisterShard(ctx context.Context, key string) error //perm:admin
|
||||
|
||||
// IndexerAnnounceDeal informs indexer nodes that a new deal was received,
|
||||
// so they can download its index
|
||||
IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin
|
||||
|
||||
// IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals.
|
||||
IndexerAnnounceAllDeals(ctx context.Context) error //perm:admin
|
||||
|
||||
// DagstoreLookupPieces returns information about shards that contain the given CID.
|
||||
DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]DagstoreShardInfo, error) //perm:admin
|
||||
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
||||
|
||||
// RuntimeSubsystems returns the subsystems that are enabled
|
||||
// in this instance.
|
||||
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read
|
||||
|
||||
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin
|
||||
DealsList(ctx context.Context) ([]*MarketDeal, error) //perm:admin
|
||||
DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin
|
||||
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin
|
||||
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin
|
||||
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin
|
||||
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin
|
||||
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin
|
||||
DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin
|
||||
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin
|
||||
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin
|
||||
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin
|
||||
DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
||||
DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin
|
||||
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
||||
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
|
||||
|
||||
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
|
||||
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read
|
||||
|
||||
// CreateBackup creates node backup onder the specified file name. The
|
||||
// method requires that the lotus-miner is running with the
|
||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||
@ -471,37 +368,6 @@ type SectorOffset struct {
|
||||
Offset abi.PaddedPieceSize
|
||||
}
|
||||
|
||||
// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that
|
||||
// we expose through JSON-RPC to avoid clients having to depend on the
|
||||
// dagstore lib.
|
||||
type DagstoreShardInfo struct {
|
||||
Key string
|
||||
State string
|
||||
Error string
|
||||
}
|
||||
|
||||
// DagstoreShardResult enumerates results per shard.
|
||||
type DagstoreShardResult struct {
|
||||
Key string
|
||||
Success bool
|
||||
Error string
|
||||
}
|
||||
|
||||
type DagstoreInitializeAllParams struct {
|
||||
MaxConcurrency int
|
||||
IncludeSealed bool
|
||||
}
|
||||
|
||||
// DagstoreInitializeAllEvent represents an initialization event.
|
||||
type DagstoreInitializeAllEvent struct {
|
||||
Key string
|
||||
Event string // "start", "end"
|
||||
Success bool
|
||||
Error string
|
||||
Total int
|
||||
Current int
|
||||
}
|
||||
|
||||
type NumAssignerMeta struct {
|
||||
Reserved bitfield.BitField
|
||||
Allocated bitfield.BitField
|
||||
|
||||
@ -16,7 +16,6 @@ import (
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/metrics"
|
||||
@ -27,9 +26,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/filestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
@ -44,7 +40,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
@ -96,10 +91,8 @@ func init() {
|
||||
addExample(pid)
|
||||
addExample(&pid)
|
||||
|
||||
storeIDExample := imports.ID(50)
|
||||
textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash")
|
||||
apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash")
|
||||
clientEvent := retrievalmarket.ClientEventDealAccepted
|
||||
|
||||
block := blocks.Block(&blocks.BasicBlock{})
|
||||
ExampleValues[reflect.TypeOf(&block).Elem()] = block
|
||||
@ -130,15 +123,7 @@ func init() {
|
||||
addExample(api.FullAPIVersion1)
|
||||
addExample(api.PCHInbound)
|
||||
addExample(time.Minute)
|
||||
addExample(graphsync.NewRequestID())
|
||||
addExample(datatransfer.TransferID(3))
|
||||
addExample(datatransfer.Ongoing)
|
||||
addExample(storeIDExample)
|
||||
addExample(&storeIDExample)
|
||||
addExample(clientEvent)
|
||||
addExample(&clientEvent)
|
||||
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||
addExample(retrievalmarket.DealStatusNew)
|
||||
|
||||
addExample(&textSelExample)
|
||||
addExample(&apiSelExample)
|
||||
addExample(network.ReachabilityPublic)
|
||||
@ -206,10 +191,9 @@ func init() {
|
||||
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
||||
|
||||
// miner specific
|
||||
addExample(filestore.Path(".lotusminer/fstmp123"))
|
||||
|
||||
si := uint64(12)
|
||||
addExample(&si)
|
||||
addExample(retrievalmarket.DealID(5))
|
||||
addExample(map[string]cid.Cid{})
|
||||
addExample(map[string][]api.SealedRef{
|
||||
"98000": {
|
||||
@ -313,17 +297,8 @@ func init() {
|
||||
api.SubsystemMining,
|
||||
api.SubsystemSealing,
|
||||
api.SubsystemSectorStorage,
|
||||
api.SubsystemMarkets,
|
||||
})
|
||||
addExample(api.DagstoreShardResult{
|
||||
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
|
||||
Error: "<error>",
|
||||
})
|
||||
addExample(api.DagstoreShardInfo{
|
||||
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
|
||||
State: "ShardStateAvailable",
|
||||
Error: "<error>",
|
||||
})
|
||||
|
||||
addExample(storiface.ResourceTable)
|
||||
addExample(network.ScopeStat{
|
||||
Memory: 123,
|
||||
|
||||
@ -13,9 +13,6 @@ const (
|
||||
// SubsystemUnknown is a placeholder for the zero value. It should never
|
||||
// be used.
|
||||
SubsystemUnknown MinerSubsystem = iota
|
||||
// SubsystemMarkets signifies the storage and retrieval
|
||||
// deal-making subsystem.
|
||||
SubsystemMarkets
|
||||
// SubsystemMining signifies the mining subsystem.
|
||||
SubsystemMining
|
||||
// SubsystemSealing signifies the sealing subsystem.
|
||||
@ -26,7 +23,6 @@ const (
|
||||
|
||||
var MinerSubsystemToString = map[MinerSubsystem]string{
|
||||
SubsystemUnknown: "Unknown",
|
||||
SubsystemMarkets: "Markets",
|
||||
SubsystemMining: "Mining",
|
||||
SubsystemSealing: "Sealing",
|
||||
SubsystemSectorStorage: "SectorStorage",
|
||||
@ -34,7 +30,6 @@ var MinerSubsystemToString = map[MinerSubsystem]string{
|
||||
|
||||
var MinerSubsystemToID = map[string]MinerSubsystem{
|
||||
"Unknown": SubsystemUnknown,
|
||||
"Markets": SubsystemMarkets,
|
||||
"Mining": SubsystemMining,
|
||||
"Sealing": SubsystemSealing,
|
||||
"SectorStorage": SubsystemSectorStorage,
|
||||
|
||||
589
api/proxy_gen.go
589
api/proxy_gen.go
@ -18,10 +18,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -891,100 +887,10 @@ type StorageMinerMethods struct {
|
||||
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
DagstoreGC func(p0 context.Context) ([]DagstoreShardResult, error) `perm:"admin"`
|
||||
|
||||
DagstoreInitializeAll func(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) `perm:"write"`
|
||||
|
||||
DagstoreInitializeShard func(p0 context.Context, p1 string) error `perm:"write"`
|
||||
|
||||
DagstoreListShards func(p0 context.Context) ([]DagstoreShardInfo, error) `perm:"read"`
|
||||
|
||||
DagstoreLookupPieces func(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) `perm:"admin"`
|
||||
|
||||
DagstoreRecoverShard func(p0 context.Context, p1 string) error `perm:"write"`
|
||||
|
||||
DagstoreRegisterShard func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
DealsConsiderOfflineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsConsiderOfflineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsConsiderOnlineRetrievalDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsConsiderOnlineStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsConsiderUnverifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsConsiderVerifiedStorageDeals func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
DealsImportData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"admin"`
|
||||
|
||||
DealsList func(p0 context.Context) ([]*MarketDeal, error) `perm:"admin"`
|
||||
|
||||
DealsPieceCidBlocklist func(p0 context.Context) ([]cid.Cid, error) `perm:"admin"`
|
||||
|
||||
DealsSetConsiderOfflineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||
|
||||
DealsSetConsiderOfflineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||
|
||||
DealsSetConsiderOnlineRetrievalDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||
|
||||
DealsSetConsiderOnlineStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||
|
||||
DealsSetConsiderUnverifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||
|
||||
DealsSetConsiderVerifiedStorageDeals func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||
|
||||
DealsSetPieceCidBlocklist func(p0 context.Context, p1 []cid.Cid) error `perm:"admin"`
|
||||
|
||||
IndexerAnnounceAllDeals func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
IndexerAnnounceDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
|
||||
MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
MarketDataTransferDiagnostics func(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) `perm:"write"`
|
||||
|
||||
MarketDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
MarketGetAsk func(p0 context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
|
||||
|
||||
MarketGetDealUpdates func(p0 context.Context) (<-chan storagemarket.MinerDeal, error) `perm:"read"`
|
||||
|
||||
MarketGetRetrievalAsk func(p0 context.Context) (*retrievalmarket.Ask, error) `perm:"read"`
|
||||
|
||||
MarketImportDealData func(p0 context.Context, p1 cid.Cid, p2 string) error `perm:"write"`
|
||||
|
||||
MarketListDataTransfers func(p0 context.Context) ([]DataTransferChannel, error) `perm:"write"`
|
||||
|
||||
MarketListDeals func(p0 context.Context) ([]*MarketDeal, error) `perm:"read"`
|
||||
|
||||
MarketListIncompleteDeals func(p0 context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
|
||||
|
||||
MarketListRetrievalDeals func(p0 context.Context) ([]struct{}, error) `perm:"read"`
|
||||
|
||||
MarketPendingDeals func(p0 context.Context) (PendingDealInfo, error) `perm:"write"`
|
||||
|
||||
MarketPublishPendingDeals func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
MarketRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||
|
||||
MarketRetryPublishDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"`
|
||||
|
||||
MarketSetAsk func(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error `perm:"admin"`
|
||||
|
||||
MarketSetRetrievalAsk func(p0 context.Context, p1 *retrievalmarket.Ask) error `perm:"admin"`
|
||||
|
||||
MiningBase func(p0 context.Context) (*types.TipSet, error) `perm:"read"`
|
||||
|
||||
PiecesGetCIDInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) `perm:"read"`
|
||||
|
||||
PiecesGetPieceInfo func(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) `perm:"read"`
|
||||
|
||||
PiecesListCidInfos func(p0 context.Context) ([]cid.Cid, error) `perm:"read"`
|
||||
|
||||
PiecesListPieces func(p0 context.Context) ([]cid.Cid, error) `perm:"read"`
|
||||
|
||||
PledgeSector func(p0 context.Context) (abi.SectorID, error) `perm:"write"`
|
||||
|
||||
RecoverFault func(p0 context.Context, p1 []abi.SectorNumber) ([]cid.Cid, error) `perm:"admin"`
|
||||
@ -5353,369 +5259,6 @@ func (s *StorageMinerStub) CreateBackup(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) {
|
||||
if s.Internal.DagstoreGC == nil {
|
||||
return *new([]DagstoreShardResult), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreGC(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreGC(p0 context.Context) ([]DagstoreShardResult, error) {
|
||||
return *new([]DagstoreShardResult), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) {
|
||||
if s.Internal.DagstoreInitializeAll == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreInitializeAll(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreInitializeAll(p0 context.Context, p1 DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreInitializeShard(p0 context.Context, p1 string) error {
|
||||
if s.Internal.DagstoreInitializeShard == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreInitializeShard(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreInitializeShard(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) {
|
||||
if s.Internal.DagstoreListShards == nil {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreListShards(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreListShards(p0 context.Context) ([]DagstoreShardInfo, error) {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) {
|
||||
if s.Internal.DagstoreLookupPieces == nil {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreLookupPieces(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreLookupPieces(p0 context.Context, p1 cid.Cid) ([]DagstoreShardInfo, error) {
|
||||
return *new([]DagstoreShardInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreRecoverShard(p0 context.Context, p1 string) error {
|
||||
if s.Internal.DagstoreRecoverShard == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreRecoverShard(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreRecoverShard(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DagstoreRegisterShard(p0 context.Context, p1 string) error {
|
||||
if s.Internal.DagstoreRegisterShard == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DagstoreRegisterShard(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DagstoreRegisterShard(p0 context.Context, p1 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderOfflineRetrievalDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsConsiderOfflineRetrievalDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsConsiderOfflineRetrievalDeals(p0 context.Context) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderOfflineStorageDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsConsiderOfflineStorageDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsConsiderOfflineStorageDeals(p0 context.Context) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderOnlineRetrievalDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsConsiderOnlineRetrievalDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsConsiderOnlineRetrievalDeals(p0 context.Context) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderOnlineStorageDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsConsiderOnlineStorageDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsConsiderOnlineStorageDeals(p0 context.Context) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderUnverifiedStorageDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsConsiderUnverifiedStorageDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsConsiderUnverifiedStorageDeals(p0 context.Context) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) {
|
||||
if s.Internal.DealsConsiderVerifiedStorageDeals == nil {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsConsiderVerifiedStorageDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsConsiderVerifiedStorageDeals(p0 context.Context) (bool, error) {
|
||||
return false, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error {
|
||||
if s.Internal.DealsImportData == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsImportData(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsImportData(p0 context.Context, p1 cid.Cid, p2 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsList(p0 context.Context) ([]*MarketDeal, error) {
|
||||
if s.Internal.DealsList == nil {
|
||||
return *new([]*MarketDeal), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsList(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsList(p0 context.Context) ([]*MarketDeal, error) {
|
||||
return *new([]*MarketDeal), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) {
|
||||
if s.Internal.DealsPieceCidBlocklist == nil {
|
||||
return *new([]cid.Cid), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsPieceCidBlocklist(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsPieceCidBlocklist(p0 context.Context) ([]cid.Cid, error) {
|
||||
return *new([]cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error {
|
||||
if s.Internal.DealsSetConsiderOfflineRetrievalDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsSetConsiderOfflineRetrievalDeals(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsSetConsiderOfflineRetrievalDeals(p0 context.Context, p1 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error {
|
||||
if s.Internal.DealsSetConsiderOfflineStorageDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsSetConsiderOfflineStorageDeals(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsSetConsiderOfflineStorageDeals(p0 context.Context, p1 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error {
|
||||
if s.Internal.DealsSetConsiderOnlineRetrievalDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsSetConsiderOnlineRetrievalDeals(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsSetConsiderOnlineRetrievalDeals(p0 context.Context, p1 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error {
|
||||
if s.Internal.DealsSetConsiderOnlineStorageDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsSetConsiderOnlineStorageDeals(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsSetConsiderOnlineStorageDeals(p0 context.Context, p1 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error {
|
||||
if s.Internal.DealsSetConsiderUnverifiedStorageDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsSetConsiderUnverifiedStorageDeals(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsSetConsiderUnverifiedStorageDeals(p0 context.Context, p1 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error {
|
||||
if s.Internal.DealsSetConsiderVerifiedStorageDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsSetConsiderVerifiedStorageDeals(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsSetConsiderVerifiedStorageDeals(p0 context.Context, p1 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error {
|
||||
if s.Internal.DealsSetPieceCidBlocklist == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.DealsSetPieceCidBlocklist(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) DealsSetPieceCidBlocklist(p0 context.Context, p1 []cid.Cid) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) IndexerAnnounceAllDeals(p0 context.Context) error {
|
||||
if s.Internal.IndexerAnnounceAllDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.IndexerAnnounceAllDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) IndexerAnnounceAllDeals(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
if s.Internal.IndexerAnnounceDeal == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.IndexerAnnounceDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) IndexerAnnounceDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
if s.Internal.MarketCancelDataTransfer == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketCancelDataTransfer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) {
|
||||
if s.Internal.MarketDataTransferDiagnostics == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketDataTransferDiagnostics(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
|
||||
if s.Internal.MarketDataTransferUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketDataTransferUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) {
|
||||
if s.Internal.MarketGetAsk == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketGetAsk(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketGetAsk(p0 context.Context) (*storagemarket.SignedStorageAsk, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) {
|
||||
if s.Internal.MarketGetDealUpdates == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketGetDealUpdates(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketGetDealUpdates(p0 context.Context) (<-chan storagemarket.MinerDeal, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) {
|
||||
if s.Internal.MarketGetRetrievalAsk == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketGetRetrievalAsk(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketGetRetrievalAsk(p0 context.Context) (*retrievalmarket.Ask, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error {
|
||||
if s.Internal.MarketImportDealData == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketImportDealData(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketImportDealData(p0 context.Context, p1 cid.Cid, p2 string) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
|
||||
if s.Internal.MarketListDataTransfers == nil {
|
||||
return *new([]DataTransferChannel), ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketListDataTransfers(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketListDataTransfers(p0 context.Context) ([]DataTransferChannel, error) {
|
||||
return *new([]DataTransferChannel), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketListDeals(p0 context.Context) ([]*MarketDeal, error) {
|
||||
if s.Internal.MarketListDeals == nil {
|
||||
return *new([]*MarketDeal), ErrNotSupported
|
||||
@ -5727,94 +5270,6 @@ func (s *StorageMinerStub) MarketListDeals(p0 context.Context) ([]*MarketDeal, e
|
||||
return *new([]*MarketDeal), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) {
|
||||
if s.Internal.MarketListIncompleteDeals == nil {
|
||||
return *new([]storagemarket.MinerDeal), ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketListIncompleteDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketListIncompleteDeals(p0 context.Context) ([]storagemarket.MinerDeal, error) {
|
||||
return *new([]storagemarket.MinerDeal), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) {
|
||||
if s.Internal.MarketListRetrievalDeals == nil {
|
||||
return *new([]struct{}), ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketListRetrievalDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketListRetrievalDeals(p0 context.Context) ([]struct{}, error) {
|
||||
return *new([]struct{}), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) {
|
||||
if s.Internal.MarketPendingDeals == nil {
|
||||
return *new(PendingDealInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketPendingDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketPendingDeals(p0 context.Context) (PendingDealInfo, error) {
|
||||
return *new(PendingDealInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketPublishPendingDeals(p0 context.Context) error {
|
||||
if s.Internal.MarketPublishPendingDeals == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketPublishPendingDeals(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketPublishPendingDeals(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
if s.Internal.MarketRestartDataTransfer == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketRestartDataTransfer(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
if s.Internal.MarketRetryPublishDeal == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketRetryPublishDeal(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error {
|
||||
if s.Internal.MarketSetAsk == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketSetAsk(p0, p1, p2, p3, p4, p5)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error {
|
||||
if s.Internal.MarketSetRetrievalAsk == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.MarketSetRetrievalAsk(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) MarketSetRetrievalAsk(p0 context.Context, p1 *retrievalmarket.Ask) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) MiningBase(p0 context.Context) (*types.TipSet, error) {
|
||||
if s.Internal.MiningBase == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -5826,50 +5281,6 @@ func (s *StorageMinerStub) MiningBase(p0 context.Context) (*types.TipSet, error)
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) {
|
||||
if s.Internal.PiecesGetCIDInfo == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.PiecesGetCIDInfo(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) PiecesGetCIDInfo(p0 context.Context, p1 cid.Cid) (*piecestore.CIDInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) {
|
||||
if s.Internal.PiecesGetPieceInfo == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.PiecesGetPieceInfo(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) PiecesGetPieceInfo(p0 context.Context, p1 cid.Cid) (*piecestore.PieceInfo, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) {
|
||||
if s.Internal.PiecesListCidInfos == nil {
|
||||
return *new([]cid.Cid), ErrNotSupported
|
||||
}
|
||||
return s.Internal.PiecesListCidInfos(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) PiecesListCidInfos(p0 context.Context) ([]cid.Cid, error) {
|
||||
return *new([]cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) {
|
||||
if s.Internal.PiecesListPieces == nil {
|
||||
return *new([]cid.Cid), ErrNotSupported
|
||||
}
|
||||
return s.Internal.PiecesListPieces(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) PiecesListPieces(p0 context.Context) ([]cid.Cid, error) {
|
||||
return *new([]cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) PledgeSector(p0 context.Context) (abi.SectorID, error) {
|
||||
if s.Internal.PledgeSector == nil {
|
||||
return *new(abi.SectorID), ErrNotSupported
|
||||
|
||||
125
api/types.go
125
api/types.go
@ -2,22 +2,16 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
"github.com/ipld/go-ipld-prime/codec/dagjson"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
@ -69,71 +63,6 @@ type MessageSendSpec struct {
|
||||
MaximizeFeeCap bool
|
||||
}
|
||||
|
||||
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||
type GraphSyncDataTransfer struct {
|
||||
// GraphSync request id for this transfer
|
||||
RequestID *graphsync.RequestID
|
||||
// Graphsync state for this transfer
|
||||
RequestState string
|
||||
// If a channel ID is present, indicates whether this is the current graphsync request for this channel
|
||||
// (could have changed in a restart)
|
||||
IsCurrentChannelRequest bool
|
||||
// Data transfer channel ID for this transfer
|
||||
ChannelID *datatransfer.ChannelID
|
||||
// Data transfer state for this transfer
|
||||
ChannelState *DataTransferChannel
|
||||
// Diagnostic information about this request -- and unexpected inconsistencies in
|
||||
// request state
|
||||
Diagnostics []string
|
||||
}
|
||||
|
||||
// TransferDiagnostics give current information about transfers going over graphsync that may be helpful for debugging
|
||||
type TransferDiagnostics struct {
|
||||
ReceivingTransfers []*GraphSyncDataTransfer
|
||||
SendingTransfers []*GraphSyncDataTransfer
|
||||
}
|
||||
|
||||
type DataTransferChannel struct {
|
||||
TransferID datatransfer.TransferID
|
||||
Status datatransfer.Status
|
||||
BaseCID cid.Cid
|
||||
IsInitiator bool
|
||||
IsSender bool
|
||||
Voucher string
|
||||
Message string
|
||||
OtherPeer peer.ID
|
||||
Transferred uint64
|
||||
Stages *datatransfer.ChannelStages
|
||||
}
|
||||
|
||||
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
|
||||
func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel {
|
||||
channel := DataTransferChannel{
|
||||
TransferID: channelState.TransferID(),
|
||||
Status: channelState.Status(),
|
||||
BaseCID: channelState.BaseCID(),
|
||||
IsSender: channelState.Sender() == hostID,
|
||||
Message: channelState.Message(),
|
||||
}
|
||||
voucher := channelState.Voucher()
|
||||
voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode)
|
||||
if err != nil {
|
||||
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
|
||||
} else {
|
||||
channel.Voucher = string(voucherJSON)
|
||||
}
|
||||
if channel.IsSender {
|
||||
channel.IsInitiator = !channelState.IsPull()
|
||||
channel.Transferred = channelState.Sent()
|
||||
channel.OtherPeer = channelState.Recipient()
|
||||
} else {
|
||||
channel.IsInitiator = channelState.IsPull()
|
||||
channel.Transferred = channelState.Received()
|
||||
channel.OtherPeer = channelState.Sender()
|
||||
}
|
||||
return channel
|
||||
}
|
||||
|
||||
type NetStat struct {
|
||||
System *network.ScopeStat `json:",omitempty"`
|
||||
Transient *network.ScopeStat `json:",omitempty"`
|
||||
@ -229,31 +158,6 @@ type MessagePrototype struct {
|
||||
ValidNonce bool
|
||||
}
|
||||
|
||||
type RetrievalInfo struct {
|
||||
PayloadCID cid.Cid
|
||||
ID retrievalmarket.DealID
|
||||
PieceCID *cid.Cid
|
||||
PricePerByte abi.TokenAmount
|
||||
UnsealPrice abi.TokenAmount
|
||||
|
||||
Status retrievalmarket.DealStatus
|
||||
Message string // more information about deal state, particularly errors
|
||||
Provider peer.ID
|
||||
BytesReceived uint64
|
||||
BytesPaidFor uint64
|
||||
TotalPaid abi.TokenAmount
|
||||
|
||||
TransferChannelID *datatransfer.ChannelID
|
||||
DataTransfer *DataTransferChannel
|
||||
|
||||
// optional event if part of ClientGetRetrievalUpdates
|
||||
Event *retrievalmarket.ClientEvent
|
||||
}
|
||||
|
||||
type RestrievalRes struct {
|
||||
DealID retrievalmarket.DealID
|
||||
}
|
||||
|
||||
// Selector specifies ipld selector string
|
||||
// - if the string starts with '{', it's interpreted as json selector string
|
||||
// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/
|
||||
@ -261,35 +165,6 @@ type RestrievalRes struct {
|
||||
// see https://github.com/ipld/go-ipld-selector-text-lite
|
||||
type Selector string
|
||||
|
||||
type DagSpec struct {
|
||||
// DataSelector matches data to be retrieved
|
||||
// - when using textselector, the path specifies subtree
|
||||
// - the matched graph must have a single root
|
||||
DataSelector *Selector
|
||||
|
||||
// ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector
|
||||
// When true, in addition to the selection target, the resulting CAR will contain every block along the
|
||||
// path back to, and including the original root
|
||||
// When false the resulting CAR contains only the blocks of the target subdag
|
||||
ExportMerkleProof bool
|
||||
}
|
||||
|
||||
type ExportRef struct {
|
||||
Root cid.Cid
|
||||
|
||||
// DAGs array specifies a list of DAGs to export
|
||||
// - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node
|
||||
// - If exporting into a car file
|
||||
// - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root
|
||||
// - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car
|
||||
// - When not specified defaults to a single DAG:
|
||||
// - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}`
|
||||
DAGs []DagSpec
|
||||
|
||||
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||
DealID retrievalmarket.DealID
|
||||
}
|
||||
|
||||
type MinerInfo struct {
|
||||
Owner address.Address // Must be an ID-address.
|
||||
Worker address.Address // Must be an ID-address.
|
||||
|
||||
@ -5,11 +5,9 @@ import (
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
@ -670,37 +668,3 @@ type FullNode interface {
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
}
|
||||
|
||||
func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder {
|
||||
return RetrievalOrder{
|
||||
Root: o.Root,
|
||||
Piece: o.Piece,
|
||||
Size: o.Size,
|
||||
Total: o.MinPrice,
|
||||
UnsealPrice: o.UnsealPrice,
|
||||
PaymentInterval: o.PaymentInterval,
|
||||
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
|
||||
Client: client,
|
||||
|
||||
Miner: o.Miner,
|
||||
MinerPeer: &o.MinerPeer,
|
||||
}
|
||||
}
|
||||
|
||||
type RetrievalOrder struct {
|
||||
// TODO: make this less unixfs specific
|
||||
Root cid.Cid
|
||||
Piece *cid.Cid
|
||||
DatamodelPathSelector *textselector.Expression
|
||||
Size uint64
|
||||
|
||||
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||
// TODO: support offset
|
||||
Total types.BigInt
|
||||
UnsealPrice types.BigInt
|
||||
PaymentInterval uint64
|
||||
PaymentIntervalIncrease uint64
|
||||
Client address.Address
|
||||
Miner address.Address
|
||||
MinerPeer *retrievalmarket.RetrievalPeer
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -242,7 +242,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3871"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3777"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -473,7 +473,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3882"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3788"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -572,7 +572,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3893"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3799"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -604,7 +604,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3904"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3810"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -710,7 +710,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3915"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3821"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -803,7 +803,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3926"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3832"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -887,7 +887,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3937"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3843"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -987,7 +987,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3948"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3854"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1043,7 +1043,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3959"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3865"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1116,7 +1116,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3970"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3876"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1189,7 +1189,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3981"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3887"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1236,7 +1236,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3992"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3898"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1268,7 +1268,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4003"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3909"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1305,7 +1305,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4025"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3931"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1352,7 +1352,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4036"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3942"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1392,7 +1392,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4047"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3953"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1439,7 +1439,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4058"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3964"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1494,7 +1494,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4069"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3975"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1523,7 +1523,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4080"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3986"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1660,7 +1660,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4091"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3997"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1689,7 +1689,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4102"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4008"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1743,7 +1743,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4113"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4019"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1834,7 +1834,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4124"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4030"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1862,7 +1862,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4135"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4041"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1952,7 +1952,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4146"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4052"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2208,7 +2208,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4157"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4063"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2453,7 +2453,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4168"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4074"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2509,7 +2509,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4179"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4085"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2556,7 +2556,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4190"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4096"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2654,7 +2654,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4201"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4107"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2720,7 +2720,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4212"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4118"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2786,7 +2786,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4223"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4129"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2895,7 +2895,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4234"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4140"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -2953,7 +2953,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4245"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4151"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3075,7 +3075,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4256"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4162"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3267,7 +3267,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4267"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4173"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3476,7 +3476,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4278"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4184"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3567,7 +3567,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4289"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4195"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3625,7 +3625,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4300"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4206"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3883,7 +3883,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4311"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4217"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4158,7 +4158,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4322"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4228"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4186,7 +4186,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4333"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4239"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4224,7 +4224,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4344"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4250"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4332,7 +4332,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4355"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4261"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4370,7 +4370,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4366"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4272"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4399,7 +4399,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4377"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4283"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4462,7 +4462,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4388"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4294"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4525,7 +4525,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4399"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4305"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4570,7 +4570,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4410"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4316"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4692,7 +4692,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4421"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4327"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4847,7 +4847,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4432"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4338"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4901,7 +4901,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4443"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4349"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4955,7 +4955,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4454"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4360"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5010,7 +5010,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4465"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4371"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5112,7 +5112,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4476"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4382"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5335,7 +5335,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4487"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4393"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5518,7 +5518,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4498"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4404"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5712,7 +5712,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4509"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4415"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5758,7 +5758,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4520"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4426"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5908,7 +5908,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4531"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4437"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6045,7 +6045,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4542"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4448"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6113,7 +6113,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4553"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4459"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6230,7 +6230,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4564"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4470"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6321,7 +6321,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4575"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4481"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6407,7 +6407,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4586"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4492"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6434,7 +6434,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4597"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4503"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6461,7 +6461,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4608"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4514"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -6529,7 +6529,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4619"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4525"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7035,7 +7035,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4630"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4536"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7132,7 +7132,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4641"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4547"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7232,7 +7232,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4652"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4558"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7332,7 +7332,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4663"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4569"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7457,7 +7457,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4674"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4580"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7566,7 +7566,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4685"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4591"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7669,7 +7669,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4696"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4602"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7799,7 +7799,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4707"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4613"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7906,7 +7906,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4718"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4624"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -7967,7 +7967,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4729"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4635"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8035,7 +8035,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4740"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4646"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8116,7 +8116,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4751"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4657"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8280,7 +8280,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4762"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4668"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8373,7 +8373,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4773"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4679"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8574,7 +8574,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4784"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4690"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8685,7 +8685,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4795"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4701"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8816,7 +8816,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4806"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4712"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8902,7 +8902,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4817"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4723"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8929,7 +8929,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4828"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4734"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -8982,7 +8982,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4839"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4745"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9070,7 +9070,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4850"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4756"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9521,7 +9521,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4861"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4767"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9688,7 +9688,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4872"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4778"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9861,7 +9861,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4883"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4789"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9929,7 +9929,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4894"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4800"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -9997,7 +9997,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4905"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4811"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10158,7 +10158,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4916"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4822"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10203,7 +10203,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4938"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4844"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10248,7 +10248,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4949"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4855"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -10275,7 +10275,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4960"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4866"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -161,7 +161,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6786"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6197"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -252,7 +252,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6797"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6208"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -420,7 +420,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6808"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6219"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -447,7 +447,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6819"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6230"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -597,7 +597,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6830"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6241"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -700,7 +700,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6841"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6252"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -803,7 +803,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6852"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6263"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -925,7 +925,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6863"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6274"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1135,7 +1135,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6874"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6285"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -1306,7 +1306,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6885"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6296"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3350,7 +3350,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6896"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6307"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3470,7 +3470,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6907"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6318"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3531,7 +3531,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6918"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6329"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3569,7 +3569,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6929"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6340"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3729,7 +3729,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6940"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6351"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -3913,7 +3913,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6951"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6362"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4054,7 +4054,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6962"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6373"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4107,7 +4107,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6973"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6384"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4250,7 +4250,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6984"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6395"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4474,7 +4474,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6995"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6406"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4601,7 +4601,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7006"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6417"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4768,7 +4768,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7017"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6428"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4895,7 +4895,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7028"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6439"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4933,7 +4933,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7039"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6450"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4972,7 +4972,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7050"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6461"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -4995,7 +4995,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7061"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6472"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5034,7 +5034,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7072"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6483"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5057,7 +5057,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7083"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6494"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5096,7 +5096,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7094"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6505"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5130,7 +5130,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7105"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6516"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5184,7 +5184,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7116"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6527"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5223,7 +5223,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7127"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6538"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5262,7 +5262,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7138"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6549"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5297,7 +5297,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7149"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6560"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5477,7 +5477,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7160"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6571"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5506,7 +5506,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7171"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6582"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -5529,7 +5529,7 @@
|
||||
"deprecated": false,
|
||||
"externalDocs": {
|
||||
"description": "Github remote link",
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L7182"
|
||||
"url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6593"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/tag"
|
||||
"golang.org/x/xerrors"
|
||||
@ -29,13 +30,12 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/sub/ratelimit"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/unixfs"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
)
|
||||
|
||||
var log = logging.Logger("sub")
|
||||
var DefaultHashFunction = unixfs.DefaultHashFunction
|
||||
var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
|
||||
|
||||
var msgCidPrefix = cid.Prefix{
|
||||
Version: 1,
|
||||
|
||||
@ -1,77 +0,0 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
func ApiAddrToUrl(apiAddr string) (*url.URL, error) {
|
||||
ma, err := multiaddr.NewMultiaddr(apiAddr)
|
||||
if err == nil {
|
||||
_, addr, err := manet.DialArgs(ma)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// todo: make cliutil helpers for this
|
||||
apiAddr = "http://" + addr
|
||||
}
|
||||
aa, err := url.Parse(apiAddr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parsing api address: %w", err)
|
||||
}
|
||||
switch aa.Scheme {
|
||||
case "ws":
|
||||
aa.Scheme = "http"
|
||||
case "wss":
|
||||
aa.Scheme = "https"
|
||||
}
|
||||
|
||||
return aa, nil
|
||||
}
|
||||
|
||||
func ClientExportStream(apiAddr string, apiAuth http.Header, eref api.ExportRef, car bool) (io.ReadCloser, error) {
|
||||
rj, err := json.Marshal(eref)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("marshaling export ref: %w", err)
|
||||
}
|
||||
|
||||
aa, err := ApiAddrToUrl(apiAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aa.Path = path.Join(aa.Path, "rest/v0/export")
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header = apiAuth
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
em, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("reading error body: %w", err)
|
||||
}
|
||||
|
||||
resp.Body.Close() // nolint
|
||||
return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em))
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
@ -129,7 +129,6 @@ var initCmd = &cli.Command{
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
restoreCmd,
|
||||
serviceCmd,
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
log.Info("Initializing lotus miner")
|
||||
|
||||
@ -1,159 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
const (
|
||||
MarketsService = "markets"
|
||||
)
|
||||
|
||||
var serviceCmd = &cli.Command{
|
||||
Name: "service",
|
||||
Usage: "Initialize a lotus miner sub-service",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "config",
|
||||
Usage: "config file (config.toml)",
|
||||
Required: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "nosync",
|
||||
Usage: "don't check full-node sync status",
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "type",
|
||||
Usage: "type of service to be enabled",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "api-sealer",
|
||||
Usage: "sealer API info (lotus-miner auth api-info --perm=admin)",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "api-sector-index",
|
||||
Usage: "sector Index API info (lotus-miner auth api-info --perm=admin)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[backupFile]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
log.Info("Initializing lotus miner service")
|
||||
|
||||
es := EnabledServices(cctx.StringSlice("type"))
|
||||
|
||||
if len(es) == 0 {
|
||||
return xerrors.Errorf("at least one module must be enabled")
|
||||
}
|
||||
|
||||
// we should remove this as soon as we have more service types and not just `markets`
|
||||
if !es.Contains(MarketsService) {
|
||||
return xerrors.Errorf("markets module must be enabled")
|
||||
}
|
||||
|
||||
if !cctx.IsSet("api-sealer") {
|
||||
return xerrors.Errorf("--api-sealer is required without the sealer module enabled")
|
||||
}
|
||||
if !cctx.IsSet("api-sector-index") {
|
||||
return xerrors.Errorf("--api-sector-index is required without the sector storage module enabled")
|
||||
}
|
||||
|
||||
repoPath := cctx.String(FlagMarketsRepo)
|
||||
if repoPath == "" {
|
||||
return xerrors.Errorf("please provide Lotus markets repo path via flag %s", FlagMarketsRepo)
|
||||
}
|
||||
|
||||
if err := restore(ctx, cctx, repoPath, &storiface.StorageConfig{}, func(cfg *config.StorageMiner) error {
|
||||
cfg.Subsystems.EnableMarkets = es.Contains(MarketsService)
|
||||
cfg.Subsystems.EnableMining = false
|
||||
cfg.Subsystems.EnableSealing = false
|
||||
cfg.Subsystems.EnableSectorStorage = false
|
||||
|
||||
if !cfg.Subsystems.EnableSealing {
|
||||
ai, err := checkApiInfo(ctx, cctx.String("api-sealer"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking sealer API: %w", err)
|
||||
}
|
||||
cfg.Subsystems.SealerApiInfo = ai
|
||||
}
|
||||
|
||||
if !cfg.Subsystems.EnableSectorStorage {
|
||||
ai, err := checkApiInfo(ctx, cctx.String("api-sector-index"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("checking sector index API: %w", err)
|
||||
}
|
||||
cfg.Subsystems.SectorIndexApiInfo = ai
|
||||
}
|
||||
|
||||
return nil
|
||||
}, func(api lapi.FullNode, maddr address.Address, peerid peer.ID, mi api.MinerInfo) error {
|
||||
if es.Contains(MarketsService) {
|
||||
log.Info("Configuring miner actor")
|
||||
|
||||
if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero(), cctx.Uint64("confidence")); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type EnabledServices []string
|
||||
|
||||
func (es EnabledServices) Contains(name string) bool {
|
||||
for _, s := range es {
|
||||
if s == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkApiInfo(ctx context.Context, ai string) (string, error) {
|
||||
ai = strings.TrimPrefix(strings.TrimSpace(ai), "MINER_API_INFO=")
|
||||
info := cliutil.ParseApiInfo(ai)
|
||||
addr, err := info.DialArgs("v0")
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("Checking api version of %s", addr)
|
||||
|
||||
api, closer, err := client.NewStorageMinerRPCV0(ctx, addr, info.AuthHeader())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("checking version: %w", err)
|
||||
}
|
||||
|
||||
if !v.APIVersion.EqMajorMinor(lapi.MinerAPIVersion0) {
|
||||
return "", xerrors.Errorf("remote service API version didn't match (expected %s, remote %s)", lapi.MinerAPIVersion0, v.APIVersion)
|
||||
}
|
||||
|
||||
return ai, nil
|
||||
}
|
||||
@ -20,7 +20,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/ulimit"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
@ -121,16 +120,6 @@ var runCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := lr.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg, ok := c.(*config.StorageMiner)
|
||||
if !ok {
|
||||
return xerrors.Errorf("invalid config for repo, got: %T", c)
|
||||
}
|
||||
|
||||
bootstrapLibP2P := cfg.Subsystems.EnableMarkets
|
||||
|
||||
err = lr.Close()
|
||||
if err != nil {
|
||||
@ -141,7 +130,7 @@ var runCmd = &cli.Command{
|
||||
|
||||
var minerapi api.StorageMiner
|
||||
stop, err := node.New(ctx,
|
||||
node.StorageMiner(&minerapi, cfg.Subsystems),
|
||||
node.StorageMiner(&minerapi),
|
||||
node.Override(new(dtypes.ShutdownChan), shutdownChan),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
@ -161,20 +150,6 @@ var runCmd = &cli.Command{
|
||||
return xerrors.Errorf("getting API endpoint: %w", err)
|
||||
}
|
||||
|
||||
if bootstrapLibP2P {
|
||||
log.Infof("Bootstrapping libp2p network with full node")
|
||||
|
||||
// Bootstrap with full node
|
||||
remoteAddrs, err := nodeApi.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting full node libp2p address: %w", err)
|
||||
}
|
||||
|
||||
if err := minerapi.NetConnect(ctx, remoteAddrs); err != nil {
|
||||
return xerrors.Errorf("connecting to full node (libp2p): %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Remote version %s", v)
|
||||
|
||||
// Instantiate the miner node handler.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -49,7 +49,6 @@ USAGE:
|
||||
|
||||
COMMANDS:
|
||||
restore Initialize a lotus miner repo from a backup
|
||||
service Initialize a lotus miner sub-service
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
OPTIONS:
|
||||
@ -84,23 +83,6 @@ OPTIONS:
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
### lotus-miner init service
|
||||
```
|
||||
NAME:
|
||||
lotus-miner init service - Initialize a lotus miner sub-service
|
||||
|
||||
USAGE:
|
||||
lotus-miner init service [command options] [backupFile]
|
||||
|
||||
OPTIONS:
|
||||
--config value config file (config.toml)
|
||||
--nosync don't check full-node sync status (default: false)
|
||||
--type value [ --type value ] type of service to be enabled
|
||||
--api-sealer value sealer API info (lotus-miner auth api-info --perm=admin)
|
||||
--api-sector-index value sector Index API info (lotus-miner auth api-info --perm=admin)
|
||||
--help, -h show help
|
||||
```
|
||||
|
||||
## lotus-miner run
|
||||
```
|
||||
NAME:
|
||||
|
||||
@ -128,30 +128,6 @@
|
||||
#TracerSourceAuth = ""
|
||||
|
||||
|
||||
[Client]
|
||||
# The maximum number of simultaneous data transfers between the client
|
||||
# and storage providers for storage deals
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE
|
||||
#SimultaneousTransfersForStorage = 20
|
||||
|
||||
# The maximum number of simultaneous data transfers between the client
|
||||
# and storage providers for retrieval deals
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL
|
||||
#SimultaneousTransfersForRetrieval = 20
|
||||
|
||||
# Require that retrievals perform no on-chain operations. Paid retrievals
|
||||
# without existing payment channels with available funds will fail instead
|
||||
# of automatically performing on-chain operations.
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CLIENT_OFFCHAINRETRIEVAL
|
||||
#OffChainRetrieval = false
|
||||
|
||||
|
||||
[Wallet]
|
||||
# type: string
|
||||
# env var: LOTUS_WALLET_REMOTEBACKEND
|
||||
|
||||
@ -141,10 +141,6 @@
|
||||
# env var: LOTUS_SUBSYSTEMS_ENABLESECTORSTORAGE
|
||||
#EnableSectorStorage = true
|
||||
|
||||
# type: bool
|
||||
# env var: LOTUS_SUBSYSTEMS_ENABLEMARKETS
|
||||
#EnableMarkets = false
|
||||
|
||||
# When enabled, the sector index will reside in an external database
|
||||
# as opposed to the local KV store in the miner process
|
||||
# This is useful to allow workers to bypass the lotus miner to access sector information
|
||||
@ -188,190 +184,12 @@
|
||||
|
||||
|
||||
[Dealmaking]
|
||||
# When enabled, the miner can accept online deals
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_DEALMAKING_CONSIDERONLINESTORAGEDEALS
|
||||
#ConsiderOnlineStorageDeals = true
|
||||
|
||||
# When enabled, the miner can accept offline deals
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_DEALMAKING_CONSIDEROFFLINESTORAGEDEALS
|
||||
#ConsiderOfflineStorageDeals = true
|
||||
|
||||
# When enabled, the miner can accept retrieval deals
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_DEALMAKING_CONSIDERONLINERETRIEVALDEALS
|
||||
#ConsiderOnlineRetrievalDeals = true
|
||||
|
||||
# When enabled, the miner can accept offline retrieval deals
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_DEALMAKING_CONSIDEROFFLINERETRIEVALDEALS
|
||||
#ConsiderOfflineRetrievalDeals = true
|
||||
|
||||
# When enabled, the miner can accept verified deals
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_DEALMAKING_CONSIDERVERIFIEDSTORAGEDEALS
|
||||
#ConsiderVerifiedStorageDeals = true
|
||||
|
||||
# When enabled, the miner can accept unverified deals
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_DEALMAKING_CONSIDERUNVERIFIEDSTORAGEDEALS
|
||||
#ConsiderUnverifiedStorageDeals = true
|
||||
|
||||
# A list of Data CIDs to reject when making deals
|
||||
#
|
||||
# type: []cid.Cid
|
||||
# env var: LOTUS_DEALMAKING_PIECECIDBLOCKLIST
|
||||
#PieceCidBlocklist = []
|
||||
|
||||
# Maximum expected amount of time getting the deal into a sealed sector will take
|
||||
# This includes the time the deal will need to get transferred and published
|
||||
# before being assigned to a sector
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_DEALMAKING_EXPECTEDSEALDURATION
|
||||
#ExpectedSealDuration = "24h0m0s"
|
||||
|
||||
# Maximum amount of time proposed deal StartEpoch can be in future
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_DEALMAKING_MAXDEALSTARTDELAY
|
||||
#MaxDealStartDelay = "336h0m0s"
|
||||
|
||||
# When a deal is ready to publish, the amount of time to wait for more
|
||||
# deals to be ready to publish before publishing them all as a batch
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_DEALMAKING_PUBLISHMSGPERIOD
|
||||
#PublishMsgPeriod = "1h0m0s"
|
||||
|
||||
# The maximum number of deals to include in a single PublishStorageDeals
|
||||
# message
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_DEALMAKING_MAXDEALSPERPUBLISHMSG
|
||||
#MaxDealsPerPublishMsg = 8
|
||||
|
||||
# The maximum collateral that the provider will put up against a deal,
|
||||
# as a multiplier of the minimum collateral bound
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_DEALMAKING_MAXPROVIDERCOLLATERALMULTIPLIER
|
||||
#MaxProviderCollateralMultiplier = 2
|
||||
|
||||
# The maximum allowed disk usage size in bytes of staging deals not yet
|
||||
# passed to the sealing node by the markets service. 0 is unlimited.
|
||||
#
|
||||
# type: int64
|
||||
# env var: LOTUS_DEALMAKING_MAXSTAGINGDEALSBYTES
|
||||
#MaxStagingDealsBytes = 0
|
||||
|
||||
# The maximum number of parallel online data transfers for storage deals
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE
|
||||
#SimultaneousTransfersForStorage = 20
|
||||
|
||||
# The maximum number of simultaneous data transfers from any single client
|
||||
# for storage deals.
|
||||
# Unset by default (0), and values higher than SimultaneousTransfersForStorage
|
||||
# will have no effect; i.e. the total number of simultaneous data transfers
|
||||
# across all storage clients is bound by SimultaneousTransfersForStorage
|
||||
# regardless of this number.
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT
|
||||
#SimultaneousTransfersForStoragePerClient = 0
|
||||
|
||||
# The maximum number of parallel online data transfers for retrieval deals
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORRETRIEVAL
|
||||
#SimultaneousTransfersForRetrieval = 20
|
||||
|
||||
# Minimum start epoch buffer to give time for sealing of sector with deal.
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_DEALMAKING_STARTEPOCHSEALINGBUFFER
|
||||
#StartEpochSealingBuffer = 480
|
||||
|
||||
# A command used for fine-grained evaluation of storage deals
|
||||
# see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_DEALMAKING_FILTER
|
||||
#Filter = ""
|
||||
|
||||
# A command used for fine-grained evaluation of retrieval deals
|
||||
# see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_DEALMAKING_RETRIEVALFILTER
|
||||
#RetrievalFilter = ""
|
||||
|
||||
[Dealmaking.RetrievalPricing]
|
||||
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_STRATEGY
|
||||
#Strategy = "default"
|
||||
|
||||
[Dealmaking.RetrievalPricing.Default]
|
||||
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_DEFAULT_VERIFIEDDEALSFREETRANSFER
|
||||
#VerifiedDealsFreeTransfer = true
|
||||
|
||||
[Dealmaking.RetrievalPricing.External]
|
||||
# env var: LOTUS_DEALMAKING_RETRIEVALPRICING_EXTERNAL_PATH
|
||||
#Path = ""
|
||||
|
||||
|
||||
[IndexProvider]
|
||||
# Enable set whether to enable indexing announcement to the network and expose endpoints that
|
||||
# allow indexer nodes to process announcements. Enabled by default.
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_INDEXPROVIDER_ENABLE
|
||||
#Enable = true
|
||||
|
||||
# EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement
|
||||
# entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The
|
||||
# maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and
|
||||
# the length of multihashes being advertised. For example, advertising 128-bit long multihashes
|
||||
# with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to
|
||||
# 256MiB when full.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_INDEXPROVIDER_ENTRIESCACHECAPACITY
|
||||
#EntriesCacheCapacity = 1024
|
||||
|
||||
# EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
|
||||
# Defaults to 16384 if not specified. Note that chunks are chained together for indexing
|
||||
# advertisements that include more multihashes than the configured EntriesChunkSize.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_INDEXPROVIDER_ENTRIESCHUNKSIZE
|
||||
#EntriesChunkSize = 16384
|
||||
|
||||
# TopicName sets the topic name on which the changes to the advertised content are announced.
|
||||
# If not explicitly specified, the topic name is automatically inferred from the network name
|
||||
# in following format: '/indexer/ingest/<network-name>'
|
||||
# Defaults to empty, which implies the topic name is inferred from network name.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_INDEXPROVIDER_TOPICNAME
|
||||
#TopicName = ""
|
||||
|
||||
# PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine
|
||||
# starts. By default, the cache is rehydrated from previously cached entries stored in
|
||||
# datastore if any is present.
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_INDEXPROVIDER_PURGECACHEONSTART
|
||||
#PurgeCacheOnStart = false
|
||||
|
||||
|
||||
[Proving]
|
||||
# Maximum number of sector checks to run in parallel. (0 = unlimited)
|
||||
@ -896,63 +714,6 @@
|
||||
#DisableWorkerFallback = false
|
||||
|
||||
|
||||
[DAGStore]
|
||||
# Path to the dagstore root directory. This directory contains three
|
||||
# subdirectories, which can be symlinked to alternative locations if
|
||||
# need be:
|
||||
# - ./transients: caches unsealed deals that have been fetched from the
|
||||
# storage subsystem for serving retrievals.
|
||||
# - ./indices: stores shard indices.
|
||||
# - ./datastore: holds the KV store tracking the state of every shard
|
||||
# known to the DAG store.
|
||||
# Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
|
||||
# <LOTUS_MINER_PATH>/dagstore (monolith deployment)
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_DAGSTORE_ROOTDIR
|
||||
#RootDir = ""
|
||||
|
||||
# The maximum amount of indexing jobs that can run simultaneously.
|
||||
# 0 means unlimited.
|
||||
# Default value: 5.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_DAGSTORE_MAXCONCURRENTINDEX
|
||||
#MaxConcurrentIndex = 5
|
||||
|
||||
# The maximum amount of unsealed deals that can be fetched simultaneously
|
||||
# from the storage subsystem. 0 means unlimited.
|
||||
# Default value: 0 (unlimited).
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_DAGSTORE_MAXCONCURRENTREADYFETCHES
|
||||
#MaxConcurrentReadyFetches = 0
|
||||
|
||||
# The maximum amount of unseals that can be processed simultaneously
|
||||
# from the storage subsystem. 0 means unlimited.
|
||||
# Default value: 0 (unlimited).
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_DAGSTORE_MAXCONCURRENTUNSEALS
|
||||
#MaxConcurrentUnseals = 5
|
||||
|
||||
# The maximum number of simultaneous inflight API calls to the storage
|
||||
# subsystem.
|
||||
# Default value: 100.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_DAGSTORE_MAXCONCURRENCYSTORAGECALLS
|
||||
#MaxConcurrencyStorageCalls = 100
|
||||
|
||||
# The time between calls to periodic dagstore GC, in time.Duration string
|
||||
# representation, e.g. 1m, 5m, 1h.
|
||||
# Default value: 1 minute.
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_DAGSTORE_GCINTERVAL
|
||||
#GCInterval = "1m0s"
|
||||
|
||||
|
||||
[HarmonyDB]
|
||||
# HOSTS is a list of hostnames to nodes running YugabyteDB
|
||||
# in a cluster. Only 1 is required
|
||||
|
||||
132
go.mod
132
go.mod
@ -29,7 +29,6 @@ require (
|
||||
github.com/elastic/gosigar v0.14.2
|
||||
github.com/etclabscore/go-openrpc-reflect v0.0.36
|
||||
github.com/fatih/color v1.15.0
|
||||
github.com/filecoin-project/dagstore v0.5.2
|
||||
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20220519234331-bfd1f5f9fe38
|
||||
github.com/filecoin-project/go-address v1.1.0
|
||||
github.com/filecoin-project/go-amt-ipld/v4 v4.3.0
|
||||
@ -38,11 +37,9 @@ require (
|
||||
github.com/filecoin-project/go-commp-utils v0.1.3
|
||||
github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837
|
||||
github.com/filecoin-project/go-crypto v0.0.1
|
||||
github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc8
|
||||
github.com/filecoin-project/go-fil-commcid v0.1.0
|
||||
github.com/filecoin-project/go-fil-markets v1.28.3
|
||||
github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0
|
||||
github.com/filecoin-project/go-jsonrpc v0.4.0
|
||||
github.com/filecoin-project/go-jsonrpc v0.3.2
|
||||
github.com/filecoin-project/go-padreader v0.0.1
|
||||
github.com/filecoin-project/go-paramfetch v0.0.4
|
||||
github.com/filecoin-project/go-state-types v0.14.0-dev
|
||||
@ -65,20 +62,20 @@ require (
|
||||
github.com/go-openapi/spec v0.19.11
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/google/uuid v1.5.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.1
|
||||
github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487
|
||||
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
|
||||
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.7
|
||||
github.com/hashicorp/golang-lru/arc/v2 v2.0.5
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7
|
||||
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
|
||||
github.com/invopop/jsonschema v0.12.0
|
||||
github.com/ipfs/bbloom v0.0.4
|
||||
github.com/ipfs/boxo v0.20.0
|
||||
github.com/ipfs/boxo v0.18.0
|
||||
github.com/ipfs/go-block-format v0.2.0
|
||||
github.com/ipfs/go-cid v0.4.1
|
||||
github.com/ipfs/go-cidutil v0.1.0
|
||||
@ -87,29 +84,24 @@ require (
|
||||
github.com/ipfs/go-ds-leveldb v0.5.0
|
||||
github.com/ipfs/go-ds-measure v0.2.0
|
||||
github.com/ipfs/go-fs-lock v0.0.7
|
||||
github.com/ipfs/go-graphsync v0.17.0
|
||||
github.com/ipfs/go-ipfs-blocksutil v0.0.1
|
||||
github.com/ipfs/go-ipld-cbor v0.1.0
|
||||
github.com/ipfs/go-ipld-format v0.6.0
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/ipfs/go-metrics-interface v0.0.1
|
||||
github.com/ipfs/go-metrics-prometheus v0.0.2
|
||||
github.com/ipfs/go-unixfsnode v1.9.0
|
||||
github.com/ipld/go-car v0.6.2
|
||||
github.com/ipld/go-car v0.6.1
|
||||
github.com/ipld/go-car/v2 v2.13.1
|
||||
github.com/ipld/go-codec-dagpb v1.6.0
|
||||
github.com/ipld/go-ipld-prime v0.21.0
|
||||
github.com/ipld/go-ipld-selector-text-lite v0.0.1
|
||||
github.com/ipni/go-libipni v0.0.8
|
||||
github.com/ipni/index-provider v0.12.0
|
||||
github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/klauspost/compress v1.17.8
|
||||
github.com/klauspost/compress v1.17.6
|
||||
github.com/koalacxr/quantile v0.0.1
|
||||
github.com/libp2p/go-buffer-pool v0.1.0
|
||||
github.com/libp2p/go-libp2p v0.34.1
|
||||
github.com/libp2p/go-libp2p v0.33.2
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.25.2
|
||||
github.com/libp2p/go-libp2p-pubsub v0.11.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.10.1
|
||||
github.com/libp2p/go-libp2p-record v0.2.0
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.3
|
||||
github.com/libp2p/go-maddr-filter v0.1.0
|
||||
@ -120,14 +112,14 @@ require (
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/multiformats/go-base32 v0.1.0
|
||||
github.com/multiformats/go-multiaddr v0.12.4
|
||||
github.com/multiformats/go-multiaddr v0.12.3
|
||||
github.com/multiformats/go-multiaddr-dns v0.3.1
|
||||
github.com/multiformats/go-multicodec v0.9.0
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
github.com/multiformats/go-varint v0.0.7
|
||||
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
||||
github.com/polydawn/refmt v0.89.0
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_golang v1.18.0
|
||||
github.com/puzpuzpuz/xsync/v2 v2.4.0
|
||||
github.com/raulk/clock v1.1.0
|
||||
github.com/raulk/go-watchdog v1.3.0
|
||||
@ -145,21 +137,21 @@ require (
|
||||
github.com/zondax/ledger-filecoin-go v0.11.1
|
||||
github.com/zyedidia/generic v1.2.1
|
||||
go.opencensus.io v0.24.0
|
||||
go.opentelemetry.io/otel v1.26.0
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
go.opentelemetry.io/otel/bridge/opencensus v0.39.0
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.14.0
|
||||
go.opentelemetry.io/otel/sdk v1.26.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/fx v1.21.1
|
||||
go.uber.org/fx v1.20.1
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/term v0.20.0
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/net v0.23.0
|
||||
golang.org/x/sync v0.6.0
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/term v0.18.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.21.0
|
||||
golang.org/x/tools v0.18.0
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.28
|
||||
gotest.tools v2.2.0+incompatible
|
||||
@ -172,14 +164,12 @@ require (
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/akavel/rsrc v0.8.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bep/debounce v1.2.1 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cilium/ebpf v0.9.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect
|
||||
@ -187,7 +177,7 @@ require (
|
||||
github.com/daaku/go.zipexe v1.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
|
||||
github.com/drand/kyber-bls12381 v0.3.1 // indirect
|
||||
@ -195,8 +185,6 @@ require (
|
||||
github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect
|
||||
github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 // indirect
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
@ -210,55 +198,50 @@ require (
|
||||
github.com/go-openapi/jsonpointer v0.19.3 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.4 // indirect
|
||||
github.com/go-openapi/swag v0.19.11 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.2.0 // indirect
|
||||
github.com/golang/glog v1.1.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 // indirect
|
||||
github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect
|
||||
github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iancoleman/orderedmap v0.1.0 // indirect
|
||||
github.com/ipfs/go-bitfield v1.1.0 // indirect
|
||||
github.com/ipfs/go-blockservice v0.5.2 // indirect
|
||||
github.com/ipfs/go-ipfs-blockstore v1.3.1 // indirect
|
||||
github.com/ipfs/go-blockservice v0.5.1 // indirect
|
||||
github.com/ipfs/go-ipfs-blockstore v1.3.0 // indirect
|
||||
github.com/ipfs/go-ipfs-delay v0.0.1 // indirect
|
||||
github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect
|
||||
github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect
|
||||
github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect
|
||||
github.com/ipfs/go-ipfs-pq v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipfs-util v0.0.3 // indirect
|
||||
github.com/ipfs/go-ipld-legacy v0.2.1 // indirect
|
||||
github.com/ipfs/go-libipfs v0.7.0 // indirect
|
||||
github.com/ipfs/go-log v1.0.5 // indirect
|
||||
github.com/ipfs/go-merkledag v0.11.0 // indirect
|
||||
github.com/ipfs/go-peertaskqueue v0.8.1 // indirect
|
||||
github.com/ipfs/go-verifcid v0.0.3 // indirect
|
||||
github.com/ipld/go-ipld-adl-hamt v0.0.0-20220616142416-9004dbd839e0 // indirect
|
||||
github.com/ipfs/go-verifcid v0.0.2 // indirect
|
||||
github.com/ipld/go-codec-dagpb v1.6.0 // indirect
|
||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
|
||||
github.com/jackc/pgx/v5 v5.4.1 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/jbenet/goprocess v0.1.4 // indirect
|
||||
github.com/jessevdk/go-flags v1.4.0 // indirect
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/kilic/bls12-381 v0.1.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect
|
||||
github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect
|
||||
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.1 // indirect
|
||||
@ -270,7 +253,7 @@ require (
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/miekg/dns v1.1.59 // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
@ -281,43 +264,26 @@ require (
|
||||
github.com/multiformats/go-multistream v0.5.0 // indirect
|
||||
github.com/nikkolasg/hexjson v0.1.0 // indirect
|
||||
github.com/nkovacs/streamquote v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.15.0 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/ice/v2 v2.3.24 // indirect
|
||||
github.com/pion/interceptor v0.1.29 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.14 // indirect
|
||||
github.com/pion/rtp v1.8.6 // indirect
|
||||
github.com/pion/sctp v1.8.16 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.9 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.18 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/transport/v2 v2.2.5 // indirect
|
||||
github.com/pion/turn/v2 v2.1.6 // indirect
|
||||
github.com/pion/webrtc/v3 v3.2.40 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.53.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.47.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/statsd_exporter v0.22.7 // indirect
|
||||
github.com/quic-go/qpack v0.4.0 // indirect
|
||||
github.com/quic-go/quic-go v0.44.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.8.0 // indirect
|
||||
github.com/quic-go/quic-go v0.42.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.6.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shirou/gopsutil v2.18.12+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.9.2 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/tidwall/gjson v1.14.4 // indirect
|
||||
github.com/twmb/murmur3 v1.1.6 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.0.1 // indirect
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect
|
||||
@ -329,23 +295,23 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/zondax/hid v0.9.2 // indirect
|
||||
github.com/zondax/ledger-go v0.14.3 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.39.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||
go.uber.org/dig v1.17.1 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/text v0.15.0 // indirect
|
||||
gonum.org/v1/gonum v0.15.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
|
||||
google.golang.org/grpc v1.64.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
gonum.org/v1/gonum v0.14.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect
|
||||
google.golang.org/grpc v1.60.1 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
lukechampine.com/blake3 v1.3.0 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
)
|
||||
|
||||
// https://github.com/magik6k/reflink/commit/cff5a40f3eeca17f44fc95a57ff3878e5ac761dc
|
||||
|
||||
@ -116,11 +116,11 @@ func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
||||
return len(peerIDs)
|
||||
}
|
||||
|
||||
require.Equal(t, countPeerIDs(peers), 2, "node one doesn't have 2 peers")
|
||||
require.Equal(t, countPeerIDs(peers), 1, "node one doesn't have 1 peer")
|
||||
|
||||
peers, err = two.NetPeers(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, countPeerIDs(peers), 2, "node one doesn't have 2 peers")
|
||||
require.Equal(t, countPeerIDs(peers), 1, "node one doesn't have 1 peer")
|
||||
}
|
||||
|
||||
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
||||
|
||||
@ -50,8 +50,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/gateway"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/markets/idxprov"
|
||||
"github.com/filecoin-project/lotus/markets/idxprov/idxprov_test"
|
||||
lotusminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
@ -603,12 +601,10 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
n.t.Fatalf("invalid config from repo, got: %T", c)
|
||||
}
|
||||
cfg.Common.API.RemoteListenAddress = m.RemoteListener.Addr().String()
|
||||
cfg.Subsystems.EnableMarkets = m.options.subsystems.Has(SMarkets)
|
||||
cfg.Subsystems.EnableMining = m.options.subsystems.Has(SMining)
|
||||
cfg.Subsystems.EnableSealing = m.options.subsystems.Has(SSealing)
|
||||
cfg.Subsystems.EnableSectorStorage = m.options.subsystems.Has(SSectorStorage)
|
||||
cfg.Subsystems.EnableSectorIndexDB = m.options.subsystems.Has(SHarmony)
|
||||
cfg.Dealmaking.MaxStagingDealsBytes = m.options.maxStagingDealsBytes
|
||||
|
||||
if m.options.mainMiner != nil {
|
||||
token, err := m.options.mainMiner.FullNode.AuthNew(ctx, api.AllPermissions)
|
||||
@ -694,7 +690,7 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
m.FullNode = &minerCopy
|
||||
|
||||
opts := []node.Option{
|
||||
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
||||
node.StorageMiner(&m.StorageMiner),
|
||||
node.Base(),
|
||||
node.Repo(r),
|
||||
node.Test(),
|
||||
@ -737,13 +733,6 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
if m.options.subsystems.Has(SMarkets) {
|
||||
opts = append(opts,
|
||||
node.Override(new(idxprov.MeshCreator), idxprov_test.NewNoopMeshCreator),
|
||||
)
|
||||
}
|
||||
|
||||
// append any node builder options.
|
||||
opts = append(opts, m.options.extraNodeOpts...)
|
||||
|
||||
@ -916,15 +905,6 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
|
||||
// InterconnectAll connects all miners and full nodes to one another.
|
||||
func (n *Ensemble) InterconnectAll() *Ensemble {
|
||||
// connect full nodes to miners.
|
||||
for _, from := range n.active.fullnodes {
|
||||
for _, to := range n.active.miners {
|
||||
// []*TestMiner to []api.CommonAPI type coercion not possible
|
||||
// so cannot use variadic form.
|
||||
n.Connect(from, to)
|
||||
}
|
||||
}
|
||||
|
||||
// connect full nodes between each other, skipping ourselves.
|
||||
last := len(n.active.fullnodes) - 1
|
||||
for i, from := range n.active.fullnodes {
|
||||
|
||||
@ -2,7 +2,6 @@ package kit
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EnsembleMinimal creates and starts an Ensemble with a single full node and a single miner.
|
||||
@ -37,29 +36,6 @@ func EnsembleWorker(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMine
|
||||
return &full, &miner, &worker, ens
|
||||
}
|
||||
|
||||
func EnsembleWithMinerAndMarketNodes(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMiner, *TestMiner, *Ensemble) {
|
||||
eopts, nopts := siftOptions(t, opts)
|
||||
|
||||
var (
|
||||
fullnode TestFullNode
|
||||
main, market TestMiner
|
||||
)
|
||||
|
||||
mainNodeOpts := []NodeOpt{WithSubsystems(SSealing, SSectorStorage, SMining), DisableLibp2p()}
|
||||
mainNodeOpts = append(mainNodeOpts, nopts...)
|
||||
|
||||
blockTime := 100 * time.Millisecond
|
||||
ens := NewEnsemble(t, eopts...).FullNode(&fullnode, nopts...).Miner(&main, &fullnode, mainNodeOpts...).Start()
|
||||
ens.BeginMining(blockTime)
|
||||
|
||||
marketNodeOpts := []NodeOpt{OwnerAddr(fullnode.DefaultKey), MainMiner(&main), WithSubsystems(SMarkets)}
|
||||
marketNodeOpts = append(marketNodeOpts, nopts...)
|
||||
|
||||
ens.Miner(&market, &fullnode, marketNodeOpts...).Start().Connect(market, fullnode)
|
||||
|
||||
return &fullnode, &main, &market, ens
|
||||
}
|
||||
|
||||
// EnsembleTwoOne creates and starts an Ensemble with two full nodes and one miner.
|
||||
// It does not interconnect nodes nor does it begin mining.
|
||||
//
|
||||
|
||||
@ -32,8 +32,7 @@ import (
|
||||
type MinerSubsystem int
|
||||
|
||||
const (
|
||||
SMarkets MinerSubsystem = 1 << iota
|
||||
SMining
|
||||
SMining MinerSubsystem = 1 << iota
|
||||
SSealing
|
||||
SSectorStorage
|
||||
|
||||
|
||||
@ -84,7 +84,6 @@ type NodeOpt func(opts *nodeOpts) error
|
||||
|
||||
func WithAllSubsystems() NodeOpt {
|
||||
return func(opts *nodeOpts) error {
|
||||
opts.subsystems = opts.subsystems.Add(SMarkets)
|
||||
opts.subsystems = opts.subsystems.Add(SMining)
|
||||
opts.subsystems = opts.subsystems.Add(SSealing)
|
||||
opts.subsystems = opts.subsystems.Add(SSectorStorage)
|
||||
|
||||
@ -1,159 +0,0 @@
|
||||
package unixfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/ipfs/boxo/blockservice"
|
||||
bstore "github.com/ipfs/boxo/blockstore"
|
||||
chunker "github.com/ipfs/boxo/chunker"
|
||||
offline "github.com/ipfs/boxo/exchange/offline"
|
||||
"github.com/ipfs/boxo/files"
|
||||
"github.com/ipfs/boxo/ipld/merkledag"
|
||||
"github.com/ipfs/boxo/ipld/unixfs/importer/balanced"
|
||||
ihelper "github.com/ipfs/boxo/ipld/unixfs/importer/helpers"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-cidutil"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
)
|
||||
|
||||
var DefaultHashFunction = uint64(mh.BLAKE2B_MIN + 31)
|
||||
|
||||
func CidBuilder() (cid.Builder, error) {
|
||||
prefix, err := merkledag.PrefixForCidVersion(1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize UnixFS CID Builder: %w", err)
|
||||
}
|
||||
prefix.MhType = DefaultHashFunction
|
||||
b := cidutil.InlineBuilder{
|
||||
Builder: prefix,
|
||||
Limit: 126,
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// CreateFilestore takes a standard file whose path is src, forms a UnixFS DAG, and
|
||||
// writes a CARv2 file with positional mapping (backed by the go-filestore library).
|
||||
func CreateFilestore(ctx context.Context, srcPath string, dstPath string) (cid.Cid, error) {
|
||||
// This method uses a two-phase approach with a staging CAR blockstore and
|
||||
// a final CAR blockstore.
|
||||
//
|
||||
// This is necessary because of https://github.com/ipld/go-car/issues/196
|
||||
//
|
||||
// TODO: do we need to chunk twice? Isn't the first output already in the
|
||||
// right order? Can't we just copy the CAR file and replace the header?
|
||||
|
||||
src, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to open input file: %w", err)
|
||||
}
|
||||
defer src.Close() //nolint:errcheck
|
||||
|
||||
stat, err := src.Stat()
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to stat file :%w", err)
|
||||
}
|
||||
|
||||
file, err := files.NewReaderPathFile(srcPath, src, stat)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create reader path file: %w", err)
|
||||
}
|
||||
|
||||
f, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
_ = f.Close() // close; we only want the path.
|
||||
|
||||
tmp := f.Name()
|
||||
defer os.Remove(tmp) //nolint:errcheck
|
||||
|
||||
// Step 1. Compute the UnixFS DAG and write it to a CARv2 file to get
|
||||
// the root CID of the DAG.
|
||||
fstore, err := stores.ReadWriteFilestore(tmp)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create temporary filestore: %w", err)
|
||||
}
|
||||
|
||||
finalRoot1, err := Build(ctx, file, fstore, true)
|
||||
if err != nil {
|
||||
_ = fstore.Close()
|
||||
return cid.Undef, xerrors.Errorf("failed to import file to store to compute root: %w", err)
|
||||
}
|
||||
|
||||
if err := fstore.Close(); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to finalize car filestore: %w", err)
|
||||
}
|
||||
|
||||
// Step 2. We now have the root of the UnixFS DAG, and we can write the
|
||||
// final CAR for real under `dst`.
|
||||
bs, err := stores.ReadWriteFilestore(dstPath, finalRoot1)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create a carv2 read/write filestore: %w", err)
|
||||
}
|
||||
|
||||
// rewind file to the beginning.
|
||||
if _, err := src.Seek(0, 0); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to rewind file: %w", err)
|
||||
}
|
||||
|
||||
finalRoot2, err := Build(ctx, file, bs, true)
|
||||
if err != nil {
|
||||
_ = bs.Close()
|
||||
return cid.Undef, xerrors.Errorf("failed to create UnixFS DAG with carv2 blockstore: %w", err)
|
||||
}
|
||||
|
||||
if err := bs.Close(); err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to finalize car blockstore: %w", err)
|
||||
}
|
||||
|
||||
if finalRoot1 != finalRoot2 {
|
||||
return cid.Undef, xerrors.New("roots do not match")
|
||||
}
|
||||
|
||||
return finalRoot1, nil
|
||||
}
|
||||
|
||||
// Build builds a UnixFS DAG out of the supplied reader,
|
||||
// and imports the DAG into the supplied service.
|
||||
func Build(ctx context.Context, reader io.Reader, into bstore.Blockstore, filestore bool) (cid.Cid, error) {
|
||||
b, err := CidBuilder()
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
bsvc := blockservice.New(into, offline.Exchange(into))
|
||||
dags := merkledag.NewDAGService(bsvc)
|
||||
bufdag := ipld.NewBufferedDAG(ctx, dags)
|
||||
|
||||
params := ihelper.DagBuilderParams{
|
||||
Maxlinks: build.UnixfsLinksPerLevel,
|
||||
RawLeaves: true,
|
||||
CidBuilder: b,
|
||||
Dagserv: bufdag,
|
||||
NoCopy: filestore,
|
||||
}
|
||||
|
||||
db, err := params.New(chunker.NewSizeSplitter(reader, int64(build.UnixfsChunkSize)))
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
nd, err := balanced.Layout(db)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
if err := bufdag.Commit(); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return nd.Cid(), nil
|
||||
}
|
||||
@ -1,128 +0,0 @@
|
||||
// stm: #unit
|
||||
package unixfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/boxo/blockservice"
|
||||
offline "github.com/ipfs/boxo/exchange/offline"
|
||||
"github.com/ipfs/boxo/files"
|
||||
"github.com/ipfs/boxo/ipld/merkledag"
|
||||
unixfile "github.com/ipfs/boxo/ipld/unixfs/file"
|
||||
"github.com/ipfs/go-cid"
|
||||
carv2 "github.com/ipld/go-car/v2"
|
||||
"github.com/ipld/go-car/v2/blockstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
)
|
||||
|
||||
// This test uses a full "dense" CARv2, and not a filestore (positional mapping).
|
||||
func TestRoundtripUnixFS_Dense(t *testing.T) {
|
||||
// stm: @CLIENT_DATA_IMPORT_002
|
||||
ctx := context.Background()
|
||||
|
||||
inputPath, inputContents := genInputFile(t)
|
||||
defer os.Remove(inputPath) //nolint:errcheck
|
||||
|
||||
carv2File := newTmpFile(t)
|
||||
defer os.Remove(carv2File) //nolint:errcheck
|
||||
|
||||
// import a file to a Unixfs DAG using a CARv2 read/write blockstore.
|
||||
bs, err := blockstore.OpenReadWrite(carv2File, nil,
|
||||
carv2.ZeroLengthSectionAsEOF(true),
|
||||
blockstore.UseWholeCIDs(true))
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := Build(ctx, bytes.NewBuffer(inputContents), bs, false)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, cid.Undef, root)
|
||||
require.NoError(t, bs.Finalize())
|
||||
|
||||
// reconstruct the file.
|
||||
readOnly, err := blockstore.OpenReadOnly(carv2File,
|
||||
carv2.ZeroLengthSectionAsEOF(true),
|
||||
blockstore.UseWholeCIDs(true))
|
||||
require.NoError(t, err)
|
||||
defer readOnly.Close() //nolint:errcheck
|
||||
|
||||
dags := merkledag.NewDAGService(blockservice.New(readOnly, offline.Exchange(readOnly)))
|
||||
|
||||
nd, err := dags.Get(ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := unixfile.NewUnixfsFile(ctx, dags, nd)
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpOutput := newTmpFile(t)
|
||||
defer os.Remove(tmpOutput) //nolint:errcheck
|
||||
require.NoError(t, files.WriteTo(file, tmpOutput))
|
||||
|
||||
// ensure contents of the initial input file and the output file are identical.
|
||||
fo, err := os.Open(tmpOutput)
|
||||
require.NoError(t, err)
|
||||
bz2, err := io.ReadAll(fo)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fo.Close())
|
||||
require.Equal(t, inputContents, bz2)
|
||||
}
|
||||
|
||||
func TestRoundtripUnixFS_Filestore(t *testing.T) {
|
||||
// stm: @CLIENT_DATA_IMPORT_001
|
||||
ctx := context.Background()
|
||||
|
||||
inputPath, inputContents := genInputFile(t)
|
||||
defer os.Remove(inputPath) //nolint:errcheck
|
||||
|
||||
dst := newTmpFile(t)
|
||||
defer os.Remove(dst) //nolint:errcheck
|
||||
|
||||
root, err := CreateFilestore(ctx, inputPath, dst)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, cid.Undef, root)
|
||||
|
||||
// convert the CARv2 to a normal file again and ensure the contents match
|
||||
fs, err := stores.ReadOnlyFilestore(dst)
|
||||
require.NoError(t, err)
|
||||
defer fs.Close() //nolint:errcheck
|
||||
|
||||
dags := merkledag.NewDAGService(blockservice.New(fs, offline.Exchange(fs)))
|
||||
|
||||
nd, err := dags.Get(ctx, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
file, err := unixfile.NewUnixfsFile(ctx, dags, nd)
|
||||
require.NoError(t, err)
|
||||
|
||||
tmpOutput := newTmpFile(t)
|
||||
defer os.Remove(tmpOutput) //nolint:errcheck
|
||||
require.NoError(t, files.WriteTo(file, tmpOutput))
|
||||
|
||||
// ensure contents of the initial input file and the output file are identical.
|
||||
fo, err := os.Open(tmpOutput)
|
||||
require.NoError(t, err)
|
||||
bz2, err := io.ReadAll(fo)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fo.Close())
|
||||
require.Equal(t, inputContents, bz2)
|
||||
}
|
||||
|
||||
// creates a new tempdir each time, guaranteeing uniqueness
|
||||
func newTmpFile(t *testing.T) string {
|
||||
return t.TempDir() + string(os.PathSeparator) + "tmp"
|
||||
}
|
||||
|
||||
func genInputFile(t *testing.T) (filepath string, contents []byte) {
|
||||
s := strings.Repeat("abcde", 100)
|
||||
tmp, err := os.CreateTemp("", "")
|
||||
require.NoError(t, err)
|
||||
_, err = io.Copy(tmp, strings.NewReader(s))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tmp.Close())
|
||||
return tmp.Name(), []byte(s)
|
||||
}
|
||||
@ -1,34 +0,0 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
bstore "github.com/ipfs/boxo/blockstore"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
)
|
||||
|
||||
// Blockstore promotes a dagstore.ReadBlockstore to a full closeable Blockstore,
|
||||
// stubbing out the write methods with erroring implementations.
|
||||
type Blockstore struct {
|
||||
dagstore.ReadBlockstore
|
||||
io.Closer
|
||||
}
|
||||
|
||||
var _ bstore.Blockstore = (*Blockstore)(nil)
|
||||
|
||||
func (b *Blockstore) DeleteBlock(context.Context, cid.Cid) error {
|
||||
return xerrors.Errorf("DeleteBlock called but not implemented")
|
||||
}
|
||||
|
||||
func (b *Blockstore) Put(context.Context, blocks.Block) error {
|
||||
return xerrors.Errorf("Put called but not implemented")
|
||||
}
|
||||
|
||||
func (b *Blockstore) PutMany(context.Context, []blocks.Block) error {
|
||||
return xerrors.Errorf("PutMany called but not implemented")
|
||||
}
|
||||
Binary file not shown.
@ -1,205 +0,0 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/dagstore/throttle"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_lotus_accessor.go -package=mock_dagstore . MinerAPI
|
||||
|
||||
type MinerAPI interface {
|
||||
FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error)
|
||||
GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error)
|
||||
IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error)
|
||||
Start(ctx context.Context) error
|
||||
}
|
||||
|
||||
type SectorAccessor interface {
|
||||
retrievalmarket.SectorAccessor
|
||||
|
||||
UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error)
|
||||
}
|
||||
|
||||
type minerAPI struct {
|
||||
pieceStore piecestore.PieceStore
|
||||
sa SectorAccessor
|
||||
throttle throttle.Throttler
|
||||
unsealThrottle throttle.Throttler
|
||||
readyMgr *shared.ReadyManager
|
||||
}
|
||||
|
||||
var _ MinerAPI = (*minerAPI)(nil)
|
||||
|
||||
func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int, unsealConcurrency int) MinerAPI {
|
||||
var unsealThrottle throttle.Throttler
|
||||
if unsealConcurrency == 0 {
|
||||
unsealThrottle = throttle.Noop()
|
||||
} else {
|
||||
unsealThrottle = throttle.Fixed(unsealConcurrency)
|
||||
}
|
||||
return &minerAPI{
|
||||
pieceStore: store,
|
||||
sa: sa,
|
||||
throttle: throttle.Fixed(concurrency),
|
||||
unsealThrottle: unsealThrottle,
|
||||
readyMgr: shared.NewReadyManager(),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *minerAPI) Start(_ context.Context) error {
|
||||
return m.readyMgr.FireReady(nil)
|
||||
}
|
||||
|
||||
func (m *minerAPI) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) {
|
||||
err := m.readyMgr.AwaitReady()
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed while waiting for accessor to start: %w", err)
|
||||
}
|
||||
|
||||
var pieceInfo piecestore.PieceInfo
|
||||
err = m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
pieceInfo, err = m.pieceStore.GetPieceInfo(pieceCid)
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
if len(pieceInfo.Deals) == 0 {
|
||||
return false, xerrors.Errorf("no storage deals found for piece %s", pieceCid)
|
||||
}
|
||||
|
||||
// check if we have an unsealed deal for the given piece in any of the unsealed sectors.
|
||||
for _, deal := range pieceInfo.Deals {
|
||||
deal := deal
|
||||
|
||||
var isUnsealed bool
|
||||
// Throttle this path to avoid flooding the storage subsystem.
|
||||
err := m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
isUnsealed, err = m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if sector %d for deal %d was unsealed: %w", deal.SectorID, deal.DealID, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("failed to check/retrieve unsealed sector: %s", err)
|
||||
continue // move on to the next match.
|
||||
}
|
||||
|
||||
if isUnsealed {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// we don't have an unsealed sector containing the piece
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) {
|
||||
err := m.readyMgr.AwaitReady()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Throttle this path to avoid flooding the storage subsystem.
|
||||
var pieceInfo piecestore.PieceInfo
|
||||
err = m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
pieceInfo, err = m.pieceStore.GetPieceInfo(pieceCid)
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
if len(pieceInfo.Deals) == 0 {
|
||||
return nil, xerrors.Errorf("no storage deals found for piece %s", pieceCid)
|
||||
}
|
||||
|
||||
// prefer an unsealed sector containing the piece if one exists
|
||||
for _, deal := range pieceInfo.Deals {
|
||||
deal := deal
|
||||
|
||||
// Throttle this path to avoid flooding the storage subsystem.
|
||||
var reader mount.Reader
|
||||
err := m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
isUnsealed, err := m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if sector %d for deal %d was unsealed: %w", deal.SectorID, deal.DealID, err)
|
||||
}
|
||||
if !isUnsealed {
|
||||
return nil
|
||||
}
|
||||
// Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing.
|
||||
reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("failed to check/retrieve unsealed sector: %s", err)
|
||||
continue // move on to the next match.
|
||||
}
|
||||
|
||||
if reader != nil {
|
||||
// we were able to obtain a reader for an already unsealed piece
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
|
||||
lastErr := xerrors.New("no sectors found to unseal from")
|
||||
|
||||
// if there is no unsealed sector containing the piece, just read the piece from the first sector we are able to unseal.
|
||||
for _, deal := range pieceInfo.Deals {
|
||||
// Note that if the deal data is not already unsealed, unsealing may
|
||||
// block for a long time with the current PoRep
|
||||
var reader mount.Reader
|
||||
deal := deal
|
||||
err := m.throttle.Do(ctx, func(ctx context.Context) (err error) {
|
||||
// Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing.
|
||||
reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded())
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err)
|
||||
log.Warn(lastErr.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Successfully fetched the deal data so return a reader over the data
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
func (m *minerAPI) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) {
|
||||
err := m.readyMgr.AwaitReady()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pieceInfo, err := m.pieceStore.GetPieceInfo(pieceCid)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("failed to fetch pieceInfo for piece %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
if len(pieceInfo.Deals) == 0 {
|
||||
return 0, xerrors.Errorf("no storage deals found for piece %s", pieceCid)
|
||||
}
|
||||
|
||||
return uint64(pieceInfo.Deals[0].Length), nil
|
||||
}
|
||||
@ -1,252 +0,0 @@
|
||||
// stm: #unit
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
ds_sync "github.com/ipfs/go-datastore/sync"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
)
|
||||
|
||||
const unsealedSectorID = abi.SectorNumber(1)
|
||||
const sealedSectorID = abi.SectorNumber(2)
|
||||
|
||||
func TestLotusAccessorFetchUnsealedPiece(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cid1, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
unsealedSectorData := "unsealed"
|
||||
sealedSectorData := "sealed"
|
||||
mockData := map[abi.SectorNumber]string{
|
||||
unsealedSectorID: unsealedSectorData,
|
||||
sealedSectorID: sealedSectorData,
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
deals []abi.SectorNumber
|
||||
fetchedData string
|
||||
isUnsealed bool
|
||||
|
||||
expectErr bool
|
||||
}{{
|
||||
// Expect error if there is no deal info for piece CID
|
||||
name: "no deals",
|
||||
expectErr: true,
|
||||
}, {
|
||||
// Expect the API to always fetch the unsealed deal (because it's
|
||||
// cheaper than fetching the sealed deal)
|
||||
name: "prefer unsealed deal",
|
||||
deals: []abi.SectorNumber{unsealedSectorID, sealedSectorID},
|
||||
fetchedData: unsealedSectorData,
|
||||
isUnsealed: true,
|
||||
}, {
|
||||
// Expect the API to unseal the data if there are no unsealed deals
|
||||
name: "unseal if necessary",
|
||||
deals: []abi.SectorNumber{sealedSectorID},
|
||||
fetchedData: sealedSectorData,
|
||||
isUnsealed: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ps := getPieceStore(t)
|
||||
rpn := &mockRPN{
|
||||
sectors: mockData,
|
||||
}
|
||||
api := NewMinerAPI(ps, rpn, 100, 5)
|
||||
require.NoError(t, api.Start(ctx))
|
||||
|
||||
// Add deals to piece store
|
||||
for _, sectorID := range tc.deals {
|
||||
dealInfo := piecestore.DealInfo{
|
||||
SectorID: sectorID,
|
||||
}
|
||||
err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Fetch the piece
|
||||
//stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001
|
||||
r, err := api.FetchUnsealedPiece(ctx, cid1)
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check that the returned reader is for the correct piece
|
||||
require.NoError(t, err)
|
||||
bz, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.fetchedData, string(bz))
|
||||
|
||||
//stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001
|
||||
uns, err := api.IsUnsealed(ctx, cid1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.isUnsealed, uns)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cid1, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
ps := getPieceStore(t)
|
||||
rpn := &mockRPN{}
|
||||
api := NewMinerAPI(ps, rpn, 100, 5)
|
||||
require.NoError(t, api.Start(ctx))
|
||||
|
||||
// Add a deal with data Length 10
|
||||
dealInfo := piecestore.DealInfo{
|
||||
Length: 10,
|
||||
}
|
||||
err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the data length is correct
|
||||
//stm: @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001
|
||||
l, err := api.GetUnpaddedCARSize(ctx, cid1)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 10, l)
|
||||
}
|
||||
|
||||
func TestThrottle(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cid1, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
ps := getPieceStore(t)
|
||||
rpn := &mockRPN{
|
||||
sectors: map[abi.SectorNumber]string{
|
||||
unsealedSectorID: "foo",
|
||||
},
|
||||
}
|
||||
api := NewMinerAPI(ps, rpn, 3, 5)
|
||||
require.NoError(t, api.Start(ctx))
|
||||
|
||||
// Add a deal with data Length 10
|
||||
dealInfo := piecestore.DealInfo{
|
||||
SectorID: unsealedSectorID,
|
||||
Length: 10,
|
||||
}
|
||||
err = ps.AddDealForPiece(cid1, cid.Undef, dealInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// hold the lock to block.
|
||||
rpn.lk.Lock()
|
||||
|
||||
// fetch the piece concurrently.
|
||||
errgrp, ctx := errgroup.WithContext(context.Background())
|
||||
for i := 0; i < 10; i++ {
|
||||
errgrp.Go(func() error {
|
||||
//stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001
|
||||
r, err := api.FetchUnsealedPiece(ctx, cid1)
|
||||
if err == nil {
|
||||
_ = r.Close()
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
require.EqualValues(t, 3, atomic.LoadInt32(&rpn.calls)) // throttled
|
||||
|
||||
// allow to proceed.
|
||||
rpn.lk.Unlock()
|
||||
|
||||
// allow all to finish.
|
||||
err = errgrp.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.EqualValues(t, 10, atomic.LoadInt32(&rpn.calls)) // throttled
|
||||
|
||||
}
|
||||
|
||||
func getPieceStore(t *testing.T) piecestore.PieceStore {
|
||||
ps, err := piecestoreimpl.NewPieceStore(ds_sync.MutexWrap(ds.NewMapDatastore()))
|
||||
require.NoError(t, err)
|
||||
|
||||
ch := make(chan struct{}, 1)
|
||||
ps.OnReady(func(_ error) {
|
||||
ch <- struct{}{}
|
||||
})
|
||||
|
||||
err = ps.Start(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-ch
|
||||
return ps
|
||||
}
|
||||
|
||||
type mockRPN struct {
|
||||
calls int32 // guarded by atomic
|
||||
lk sync.RWMutex // lock to simulate blocks.
|
||||
sectors map[abi.SectorNumber]string
|
||||
}
|
||||
|
||||
func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
|
||||
return m.UnsealSectorAt(ctx, sectorID, offset, length)
|
||||
}
|
||||
|
||||
func (m *mockRPN) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) {
|
||||
atomic.AddInt32(&m.calls, 1)
|
||||
m.lk.RLock()
|
||||
defer m.lk.RUnlock()
|
||||
|
||||
data, ok := m.sectors[sectorID]
|
||||
if !ok {
|
||||
panic("sector not found")
|
||||
}
|
||||
return struct {
|
||||
io.ReadCloser
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}{
|
||||
ReadCloser: io.NopCloser(bytes.NewBuffer([]byte(data[:]))),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockRPN) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
|
||||
return sectorID == unsealedSectorID, nil
|
||||
}
|
||||
|
||||
func (m *mockRPN) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockRPN) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockRPN) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paychtypes.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockRPN) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ retrievalmarket.RetrievalProviderNode = (*mockRPN)(nil)
|
||||
@ -1,97 +0,0 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/filecoin-project/lotus/markets/dagstore (interfaces: MinerAPI)
|
||||
|
||||
// Package mock_dagstore is a generated GoMock package.
|
||||
package mock_dagstore
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
|
||||
mount "github.com/filecoin-project/dagstore/mount"
|
||||
)
|
||||
|
||||
// MockMinerAPI is a mock of MinerAPI interface.
|
||||
type MockMinerAPI struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockMinerAPIMockRecorder
|
||||
}
|
||||
|
||||
// MockMinerAPIMockRecorder is the mock recorder for MockMinerAPI.
|
||||
type MockMinerAPIMockRecorder struct {
|
||||
mock *MockMinerAPI
|
||||
}
|
||||
|
||||
// NewMockMinerAPI creates a new mock instance.
|
||||
func NewMockMinerAPI(ctrl *gomock.Controller) *MockMinerAPI {
|
||||
mock := &MockMinerAPI{ctrl: ctrl}
|
||||
mock.recorder = &MockMinerAPIMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockMinerAPI) EXPECT() *MockMinerAPIMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// FetchUnsealedPiece mocks base method.
|
||||
func (m *MockMinerAPI) FetchUnsealedPiece(arg0 context.Context, arg1 cid.Cid) (mount.Reader, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "FetchUnsealedPiece", arg0, arg1)
|
||||
ret0, _ := ret[0].(mount.Reader)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// FetchUnsealedPiece indicates an expected call of FetchUnsealedPiece.
|
||||
func (mr *MockMinerAPIMockRecorder) FetchUnsealedPiece(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockMinerAPI)(nil).FetchUnsealedPiece), arg0, arg1)
|
||||
}
|
||||
|
||||
// GetUnpaddedCARSize mocks base method.
|
||||
func (m *MockMinerAPI) GetUnpaddedCARSize(arg0 context.Context, arg1 cid.Cid) (uint64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetUnpaddedCARSize", arg0, arg1)
|
||||
ret0, _ := ret[0].(uint64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetUnpaddedCARSize indicates an expected call of GetUnpaddedCARSize.
|
||||
func (mr *MockMinerAPIMockRecorder) GetUnpaddedCARSize(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockMinerAPI)(nil).GetUnpaddedCARSize), arg0, arg1)
|
||||
}
|
||||
|
||||
// IsUnsealed mocks base method.
|
||||
func (m *MockMinerAPI) IsUnsealed(arg0 context.Context, arg1 cid.Cid) (bool, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1)
|
||||
ret0, _ := ret[0].(bool)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// IsUnsealed indicates an expected call of IsUnsealed.
|
||||
func (mr *MockMinerAPIMockRecorder) IsUnsealed(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockMinerAPI)(nil).IsUnsealed), arg0, arg1)
|
||||
}
|
||||
|
||||
// Start mocks base method.
|
||||
func (m *MockMinerAPI) Start(arg0 context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Start", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Start indicates an expected call of Start.
|
||||
func (mr *MockMinerAPIMockRecorder) Start(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockMinerAPI)(nil).Start), arg0)
|
||||
}
|
||||
@ -1,91 +0,0 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
)
|
||||
|
||||
const lotusScheme = "lotus"
|
||||
|
||||
var _ mount.Mount = (*LotusMount)(nil)
|
||||
|
||||
// mountTemplate returns a templated LotusMount containing the supplied API.
|
||||
//
|
||||
// It is called when registering a mount type with the mount registry
|
||||
// of the DAG store. It is used to reinstantiate mounts after a restart.
|
||||
//
|
||||
// When the registry needs to deserialize a mount it clones the template then
|
||||
// calls Deserialize on the cloned instance, which will have a reference to the
|
||||
// lotus mount API supplied here.
|
||||
func mountTemplate(api MinerAPI) *LotusMount {
|
||||
return &LotusMount{API: api}
|
||||
}
|
||||
|
||||
// LotusMount is a DAGStore mount implementation that fetches deal data
|
||||
// from a PieceCID.
|
||||
type LotusMount struct {
|
||||
API MinerAPI
|
||||
PieceCid cid.Cid
|
||||
}
|
||||
|
||||
func NewLotusMount(pieceCid cid.Cid, api MinerAPI) (*LotusMount, error) {
|
||||
return &LotusMount{
|
||||
PieceCid: pieceCid,
|
||||
API: api,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *LotusMount) Serialize() *url.URL {
|
||||
return &url.URL{
|
||||
Host: l.PieceCid.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LotusMount) Deserialize(u *url.URL) error {
|
||||
pieceCid, err := cid.Decode(u.Host)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse PieceCid from host '%s': %w", u.Host, err)
|
||||
}
|
||||
l.PieceCid = pieceCid
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LotusMount) Fetch(ctx context.Context) (mount.Reader, error) {
|
||||
return l.API.FetchUnsealedPiece(ctx, l.PieceCid)
|
||||
}
|
||||
|
||||
func (l *LotusMount) Info() mount.Info {
|
||||
return mount.Info{
|
||||
Kind: mount.KindRemote,
|
||||
AccessSequential: true,
|
||||
AccessSeek: true,
|
||||
AccessRandom: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LotusMount) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LotusMount) Stat(ctx context.Context) (mount.Stat, error) {
|
||||
size, err := l.API.GetUnpaddedCARSize(ctx, l.PieceCid)
|
||||
if err != nil {
|
||||
return mount.Stat{}, xerrors.Errorf("failed to fetch piece size for piece %s: %w", l.PieceCid, err)
|
||||
}
|
||||
isUnsealed, err := l.API.IsUnsealed(ctx, l.PieceCid)
|
||||
if err != nil {
|
||||
return mount.Stat{}, xerrors.Errorf("failed to verify if we have the unsealed piece %s: %w", l.PieceCid, err)
|
||||
}
|
||||
|
||||
// TODO Mark false when storage deal expires.
|
||||
return mount.Stat{
|
||||
Exists: true,
|
||||
Size: int64(size),
|
||||
Ready: isUnsealed,
|
||||
}, nil
|
||||
}
|
||||
@ -1,151 +0,0 @@
|
||||
// stm: @unit
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
blocksutil "github.com/ipfs/go-ipfs-blocksutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
|
||||
mock_dagstore "github.com/filecoin-project/lotus/markets/dagstore/mocks"
|
||||
)
|
||||
|
||||
func TestLotusMount(t *testing.T) {
|
||||
//stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001, @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001
|
||||
//stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001
|
||||
ctx := context.Background()
|
||||
bgen := blocksutil.NewBlockGenerator()
|
||||
cid := bgen.Next().Cid()
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
// when test is done, assert expectations on all mock objects.
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
// create a mock lotus api that returns the reader we want
|
||||
mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl)
|
||||
|
||||
mockLotusMountAPI.EXPECT().IsUnsealed(gomock.Any(), cid).Return(true, nil).Times(1)
|
||||
|
||||
mr1 := struct {
|
||||
io.ReadCloser
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}{
|
||||
ReadCloser: io.NopCloser(strings.NewReader("testing")),
|
||||
ReaderAt: nil,
|
||||
Seeker: nil,
|
||||
}
|
||||
mr2 := struct {
|
||||
io.ReadCloser
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}{
|
||||
ReadCloser: io.NopCloser(strings.NewReader("testing")),
|
||||
ReaderAt: nil,
|
||||
Seeker: nil,
|
||||
}
|
||||
|
||||
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr1, nil).Times(1)
|
||||
mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr2, nil).Times(1)
|
||||
mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1)
|
||||
|
||||
mnt, err := NewLotusMount(cid, mockLotusMountAPI)
|
||||
require.NoError(t, err)
|
||||
info := mnt.Info()
|
||||
require.Equal(t, info.Kind, mount.KindRemote)
|
||||
|
||||
// fetch and assert success
|
||||
rd, err := mnt.Fetch(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
bz, err := io.ReadAll(rd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, rd.Close())
|
||||
require.Equal(t, []byte("testing"), bz)
|
||||
|
||||
stat, err := mnt.Stat(ctx)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 100, stat.Size)
|
||||
|
||||
// serialize url then deserialize from mount template -> should get back
|
||||
// the same mount
|
||||
url := mnt.Serialize()
|
||||
mnt2 := mountTemplate(mockLotusMountAPI)
|
||||
err = mnt2.Deserialize(url)
|
||||
require.NoError(t, err)
|
||||
|
||||
// fetching on this mount should get us back the same data.
|
||||
rd, err = mnt2.Fetch(context.Background())
|
||||
require.NoError(t, err)
|
||||
bz, err = io.ReadAll(rd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, rd.Close())
|
||||
require.Equal(t, []byte("testing"), bz)
|
||||
}
|
||||
|
||||
func TestLotusMountDeserialize(t *testing.T) {
|
||||
//stm: @MARKET_DAGSTORE_DESERIALIZE_CID_001
|
||||
api := &minerAPI{}
|
||||
|
||||
bgen := blocksutil.NewBlockGenerator()
|
||||
cid := bgen.Next().Cid()
|
||||
|
||||
// success
|
||||
us := lotusScheme + "://" + cid.String()
|
||||
u, err := url.Parse(us)
|
||||
require.NoError(t, err)
|
||||
|
||||
mnt := mountTemplate(api)
|
||||
err = mnt.Deserialize(u)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, cid, mnt.PieceCid)
|
||||
require.Equal(t, api, mnt.API)
|
||||
|
||||
// fails if cid is not valid
|
||||
us = lotusScheme + "://" + "rand"
|
||||
u, err = url.Parse(us)
|
||||
require.NoError(t, err)
|
||||
err = mnt.Deserialize(u)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to parse PieceCid")
|
||||
}
|
||||
|
||||
func TestLotusMountRegistration(t *testing.T) {
|
||||
//stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001, @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001
|
||||
//stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001
|
||||
ctx := context.Background()
|
||||
bgen := blocksutil.NewBlockGenerator()
|
||||
cid := bgen.Next().Cid()
|
||||
|
||||
// success
|
||||
us := lotusScheme + "://" + cid.String()
|
||||
u, err := url.Parse(us)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockCtrl := gomock.NewController(t)
|
||||
// when test is done, assert expectations on all mock objects.
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl)
|
||||
registry := mount.NewRegistry()
|
||||
err = registry.Register(lotusScheme, mountTemplate(mockLotusMountAPI))
|
||||
require.NoError(t, err)
|
||||
|
||||
mnt, err := registry.Instantiate(u)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockLotusMountAPI.EXPECT().IsUnsealed(ctx, cid).Return(true, nil)
|
||||
mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1)
|
||||
stat, err := mnt.Stat(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 100, stat.Size)
|
||||
require.True(t, stat.Ready)
|
||||
}
|
||||
@ -1,436 +0,0 @@
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
ds "github.com/ipfs/go-datastore"
|
||||
levelds "github.com/ipfs/go-ds-leveldb"
|
||||
measure "github.com/ipfs/go-ds-measure"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
carindex "github.com/ipld/go-car/v2/index"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
ldbopts "github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/dagstore/index"
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/dagstore/shard"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates"
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
"github.com/filecoin-project/go-statemachine/fsm"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRecoverAttempts = 1
|
||||
shardRegMarker = ".shard-registration-complete"
|
||||
)
|
||||
|
||||
var log = logging.Logger("dagstore")
|
||||
|
||||
type Wrapper struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
backgroundWg sync.WaitGroup
|
||||
|
||||
cfg config.DAGStoreConfig
|
||||
dagst dagstore.Interface
|
||||
minerAPI MinerAPI
|
||||
failureCh chan dagstore.ShardResult
|
||||
gcInterval time.Duration
|
||||
}
|
||||
|
||||
var _ stores.DAGStoreWrapper = (*Wrapper)(nil)
|
||||
|
||||
func NewDAGStore(cfg config.DAGStoreConfig, minerApi MinerAPI, h host.Host) (*dagstore.DAGStore, *Wrapper, error) {
|
||||
// construct the DAG Store.
|
||||
registry := mount.NewRegistry()
|
||||
if err := registry.Register(lotusScheme, mountTemplate(minerApi)); err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create registry: %w", err)
|
||||
}
|
||||
|
||||
// The dagstore will write Shard failures to the `failureCh` here.
|
||||
failureCh := make(chan dagstore.ShardResult, 1)
|
||||
|
||||
var (
|
||||
transientsDir = filepath.Join(cfg.RootDir, "transients")
|
||||
datastoreDir = filepath.Join(cfg.RootDir, "datastore")
|
||||
indexDir = filepath.Join(cfg.RootDir, "index")
|
||||
)
|
||||
|
||||
dstore, err := newDatastore(datastoreDir)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create dagstore datastore in %s: %w", datastoreDir, err)
|
||||
}
|
||||
|
||||
irepo, err := index.NewFSRepo(indexDir)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to initialise dagstore index repo: %w", err)
|
||||
}
|
||||
|
||||
topIndex := index.NewInverted(dstore)
|
||||
dcfg := dagstore.Config{
|
||||
TransientsDir: transientsDir,
|
||||
IndexRepo: irepo,
|
||||
Datastore: dstore,
|
||||
MountRegistry: registry,
|
||||
FailureCh: failureCh,
|
||||
TopLevelIndex: topIndex,
|
||||
// not limiting fetches globally, as the Lotus mount does
|
||||
// conditional throttling.
|
||||
MaxConcurrentIndex: cfg.MaxConcurrentIndex,
|
||||
MaxConcurrentReadyFetches: cfg.MaxConcurrentReadyFetches,
|
||||
RecoverOnStart: dagstore.RecoverOnAcquire,
|
||||
}
|
||||
|
||||
dagst, err := dagstore.NewDAGStore(dcfg)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create DAG store: %w", err)
|
||||
}
|
||||
|
||||
w := &Wrapper{
|
||||
cfg: cfg,
|
||||
dagst: dagst,
|
||||
minerAPI: minerApi,
|
||||
failureCh: failureCh,
|
||||
gcInterval: time.Duration(cfg.GCInterval),
|
||||
}
|
||||
|
||||
return dagst, w, nil
|
||||
}
|
||||
|
||||
// newDatastore creates a datastore under the given base directory
|
||||
// for dagstore metadata.
|
||||
func newDatastore(dir string) (ds.Batching, error) {
|
||||
// Create the datastore directory if it doesn't exist yet.
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, xerrors.Errorf("failed to create directory %s for DAG store datastore: %w", dir, err)
|
||||
}
|
||||
|
||||
// Create a new LevelDB datastore
|
||||
dstore, err := levelds.NewDatastore(dir, &levelds.Options{
|
||||
Compression: ldbopts.NoCompression,
|
||||
NoSync: false,
|
||||
Strict: ldbopts.StrictAll,
|
||||
ReadOnly: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to open datastore for DAG store: %w", err)
|
||||
}
|
||||
// Keep statistics about the datastore
|
||||
mds := measure.New("measure.", dstore)
|
||||
return mds, nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) Start(ctx context.Context) error {
|
||||
w.ctx, w.cancel = context.WithCancel(ctx)
|
||||
|
||||
// Run a go-routine to do DagStore GC.
|
||||
w.backgroundWg.Add(1)
|
||||
go w.gcLoop()
|
||||
|
||||
// Run a go-routine for shard recovery
|
||||
if dss, ok := w.dagst.(*dagstore.DAGStore); ok {
|
||||
w.backgroundWg.Add(1)
|
||||
go dagstore.RecoverImmediately(w.ctx, dss, w.failureCh, maxRecoverAttempts, w.backgroundWg.Done)
|
||||
}
|
||||
|
||||
return w.dagst.Start(ctx)
|
||||
}
|
||||
|
||||
func (w *Wrapper) gcLoop() {
|
||||
defer w.backgroundWg.Done()
|
||||
|
||||
ticker := time.NewTicker(w.gcInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for w.ctx.Err() == nil {
|
||||
select {
|
||||
// GC the DAG store on every tick
|
||||
case <-ticker.C:
|
||||
_, _ = w.dagst.GC(w.ctx)
|
||||
|
||||
// Exit when the DAG store wrapper is shutdown
|
||||
case <-w.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Wrapper) LoadShard(ctx context.Context, pieceCid cid.Cid) (stores.ClosableBlockstore, error) {
|
||||
log.Debugf("acquiring shard for piece CID %s", pieceCid)
|
||||
|
||||
key := shard.KeyFromCID(pieceCid)
|
||||
resch := make(chan dagstore.ShardResult, 1)
|
||||
err := w.dagst.AcquireShard(ctx, key, resch, dagstore.AcquireOpts{})
|
||||
log.Debugf("sent message to acquire shard for piece CID %s", pieceCid)
|
||||
|
||||
if err != nil {
|
||||
if !errors.Is(err, dagstore.ErrShardUnknown) {
|
||||
return nil, xerrors.Errorf("failed to schedule acquire shard for piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
// if the DAGStore does not know about the Shard -> register it and then try to acquire it again.
|
||||
log.Warnw("failed to load shard as shard is not registered, will re-register", "pieceCID", pieceCid)
|
||||
// The path of a transient file that we can ask the DAG Store to use
|
||||
// to perform the Indexing rather than fetching it via the Mount if
|
||||
// we already have a transient file. However, we don't have it here
|
||||
// and therefore we pass an empty file path.
|
||||
carPath := ""
|
||||
if err := stores.RegisterShardSync(ctx, w, pieceCid, carPath, false); err != nil {
|
||||
return nil, xerrors.Errorf("failed to re-register shard during loading piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
log.Warnw("successfully re-registered shard", "pieceCID", pieceCid)
|
||||
|
||||
resch = make(chan dagstore.ShardResult, 1)
|
||||
if err := w.dagst.AcquireShard(ctx, key, resch, dagstore.AcquireOpts{}); err != nil {
|
||||
return nil, xerrors.Errorf("failed to acquire Shard for piece CID %s after re-registering: %w", pieceCid, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: The context is not yet being actively monitored by the DAG store,
|
||||
// so we need to select against ctx.Done() until the following issue is
|
||||
// implemented:
|
||||
// https://github.com/filecoin-project/dagstore/issues/39
|
||||
var res dagstore.ShardResult
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case res = <-resch:
|
||||
if res.Error != nil {
|
||||
return nil, xerrors.Errorf("failed to acquire shard for piece CID %s: %w", pieceCid, res.Error)
|
||||
}
|
||||
}
|
||||
|
||||
bs, err := res.Accessor.Blockstore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugf("successfully loaded blockstore for piece CID %s", pieceCid)
|
||||
return &Blockstore{ReadBlockstore: bs, Closer: res.Accessor}, nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) RegisterShard(ctx context.Context, pieceCid cid.Cid, carPath string, eagerInit bool, resch chan dagstore.ShardResult) error {
|
||||
// Create a lotus mount with the piece CID
|
||||
key := shard.KeyFromCID(pieceCid)
|
||||
mt, err := NewLotusMount(pieceCid, w.minerAPI)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to create lotus mount for piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
|
||||
// Register the shard
|
||||
opts := dagstore.RegisterOpts{
|
||||
ExistingTransient: carPath,
|
||||
LazyInitialization: !eagerInit,
|
||||
}
|
||||
err = w.dagst.RegisterShard(ctx, key, mt, resch, opts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to schedule register shard for piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
log.Debugf("successfully submitted Register Shard request for piece CID %s with eagerInit=%t", pieceCid, eagerInit)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) DestroyShard(ctx context.Context, pieceCid cid.Cid, resch chan dagstore.ShardResult) error {
|
||||
key := shard.KeyFromCID(pieceCid)
|
||||
|
||||
opts := dagstore.DestroyOpts{}
|
||||
|
||||
err := w.dagst.DestroyShard(ctx, key, resch, opts)
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to schedule destroy shard for piece CID %s: %w", pieceCid, err)
|
||||
}
|
||||
log.Debugf("successfully submitted destroy Shard request for piece CID %s", pieceCid)
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (w *Wrapper) MigrateDeals(ctx context.Context, deals []storagemarket.MinerDeal) (bool, error) {
|
||||
log := log.Named("migrator")
|
||||
|
||||
// Check if all deals have already been registered as shards
|
||||
isComplete, err := w.registrationComplete()
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get dagstore migration status: %w", err)
|
||||
}
|
||||
if isComplete {
|
||||
// All deals have been registered as shards, bail out
|
||||
log.Info("no shard migration necessary; already marked complete")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Infow("registering shards for all active deals in sealing subsystem", "count", len(deals))
|
||||
|
||||
inSealingSubsystem := make(map[fsm.StateKey]struct{}, len(providerstates.StatesKnownBySealingSubsystem))
|
||||
for _, s := range providerstates.StatesKnownBySealingSubsystem {
|
||||
inSealingSubsystem[s] = struct{}{}
|
||||
}
|
||||
|
||||
// channel where results will be received, and channel where the total
|
||||
// number of registered shards will be sent.
|
||||
resch := make(chan dagstore.ShardResult, 32)
|
||||
totalCh := make(chan int)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
// Start making progress consuming results. We won't know how many to
|
||||
// actually consume until we register all shards.
|
||||
//
|
||||
// If there are any problems registering shards, just log an error
|
||||
go func() {
|
||||
defer close(doneCh)
|
||||
|
||||
var total = math.MaxInt64
|
||||
var res dagstore.ShardResult
|
||||
for rcvd := 0; rcvd < total; {
|
||||
select {
|
||||
case total = <-totalCh:
|
||||
// we now know the total number of registered shards
|
||||
// nullify so that we no longer consume from it after closed.
|
||||
close(totalCh)
|
||||
totalCh = nil
|
||||
case res = <-resch:
|
||||
rcvd++
|
||||
if res.Error == nil {
|
||||
log.Infow("async shard registration completed successfully", "shard_key", res.Key)
|
||||
} else {
|
||||
log.Warnw("async shard registration failed", "shard_key", res.Key, "error", res.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Filter for deals that are handed off.
|
||||
//
|
||||
// If the deal has not yet been handed off to the sealing subsystem, we
|
||||
// don't need to call RegisterShard in this migration; RegisterShard will
|
||||
// be called in the new code once the deal reaches the state where it's
|
||||
// handed off to the sealing subsystem.
|
||||
var registered int
|
||||
for _, deal := range deals {
|
||||
pieceCid := deal.Proposal.PieceCID
|
||||
|
||||
// enrich log statements in this iteration with deal ID and piece CID.
|
||||
log := log.With("deal_id", deal.DealID, "piece_cid", pieceCid)
|
||||
|
||||
// Filter for deals that have been handed off to the sealing subsystem
|
||||
if _, ok := inSealingSubsystem[deal.State]; !ok {
|
||||
log.Infow("deal not ready; skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infow("registering deal in dagstore with lazy init")
|
||||
|
||||
// Register the deal as a shard with the DAG store with lazy initialization.
|
||||
// The index will be populated the first time the deal is retrieved, or
|
||||
// through the bulk initialization script.
|
||||
err = w.RegisterShard(ctx, pieceCid, "", false, resch)
|
||||
if err != nil {
|
||||
log.Warnw("failed to register shard", "error", err)
|
||||
continue
|
||||
}
|
||||
registered++
|
||||
}
|
||||
|
||||
log.Infow("finished registering all shards", "total", registered)
|
||||
totalCh <- registered
|
||||
<-doneCh
|
||||
|
||||
log.Infow("confirmed registration of all shards")
|
||||
|
||||
// Completed registering all shards, so mark the migration as complete
|
||||
err = w.markRegistrationComplete()
|
||||
if err != nil {
|
||||
log.Errorf("failed to mark shards as registered: %s", err)
|
||||
} else {
|
||||
log.Info("successfully marked migration as complete")
|
||||
}
|
||||
|
||||
log.Infow("dagstore migration complete")
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Check for the existence of a "marker" file indicating that the migration
|
||||
// has completed
|
||||
func (w *Wrapper) registrationComplete() (bool, error) {
|
||||
path := filepath.Join(w.cfg.RootDir, shardRegMarker)
|
||||
_, err := os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Create a "marker" file indicating that the migration has completed
|
||||
func (w *Wrapper) markRegistrationComplete() error {
|
||||
path := filepath.Join(w.cfg.RootDir, shardRegMarker)
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.Close()
|
||||
}
|
||||
|
||||
// Get all the pieces that contain a block
|
||||
func (w *Wrapper) GetPiecesContainingBlock(blockCID cid.Cid) ([]cid.Cid, error) {
|
||||
// Pieces are stored as "shards" in the DAG store
|
||||
shardKeys, err := w.dagst.ShardsContainingMultihash(w.ctx, blockCID.Hash())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting pieces containing block %s: %w", blockCID, err)
|
||||
}
|
||||
|
||||
// Convert from shard key to cid
|
||||
pieceCids := make([]cid.Cid, 0, len(shardKeys))
|
||||
for _, k := range shardKeys {
|
||||
c, err := cid.Parse(k.String())
|
||||
if err != nil {
|
||||
prefix := fmt.Sprintf("getting pieces containing block %s:", blockCID)
|
||||
return nil, xerrors.Errorf("%s converting shard key %s to piece cid: %w", prefix, k, err)
|
||||
}
|
||||
|
||||
pieceCids = append(pieceCids, c)
|
||||
}
|
||||
|
||||
return pieceCids, nil
|
||||
}
|
||||
|
||||
func (w *Wrapper) GetIterableIndexForPiece(pieceCid cid.Cid) (carindex.IterableIndex, error) {
|
||||
return w.dagst.GetIterableIndex(shard.KeyFromCID(pieceCid))
|
||||
}
|
||||
|
||||
func (w *Wrapper) Close() error {
|
||||
// Cancel the context
|
||||
w.cancel()
|
||||
|
||||
// Close the DAG store
|
||||
log.Info("will close the dagstore")
|
||||
if err := w.dagst.Close(); err != nil {
|
||||
return xerrors.Errorf("failed to close dagstore: %w", err)
|
||||
}
|
||||
log.Info("dagstore closed")
|
||||
|
||||
// Wait for the background go routine to exit
|
||||
log.Info("waiting for dagstore background wrapper goroutines to exit")
|
||||
w.backgroundWg.Wait()
|
||||
log.Info("exited dagstore background wrapper goroutines")
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1,153 +0,0 @@
|
||||
// stm: #integration
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes"
|
||||
tut "github.com/filecoin-project/go-fil-markets/shared_testutil"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
func TestShardRegistration(t *testing.T) {
|
||||
ps := tut.NewTestPieceStore()
|
||||
sa := testnodes.NewTestSectorAccessor()
|
||||
|
||||
ctx := context.Background()
|
||||
cids := tut.GenerateCids(4)
|
||||
pieceCidUnsealed := cids[0]
|
||||
pieceCidSealed := cids[1]
|
||||
pieceCidUnsealed2 := cids[2]
|
||||
pieceCidUnsealed3 := cids[3]
|
||||
|
||||
sealedSector := abi.SectorNumber(1)
|
||||
unsealedSector1 := abi.SectorNumber(2)
|
||||
unsealedSector2 := abi.SectorNumber(3)
|
||||
unsealedSector3 := abi.SectorNumber(4)
|
||||
|
||||
// ps.ExpectPiece(pieceCidUnsealed, piecestore.PieceInfo{
|
||||
// PieceCID: pieceCidUnsealed,
|
||||
// Deals: []piecestore.DealInfo{
|
||||
// {
|
||||
// SectorID: unsealedSector1,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
//
|
||||
// ps.ExpectPiece(pieceCidSealed, piecestore.PieceInfo{
|
||||
// PieceCID: pieceCidSealed,
|
||||
// Deals: []piecestore.DealInfo{
|
||||
// {
|
||||
// SectorID: sealedSector,
|
||||
// },
|
||||
// },
|
||||
// })
|
||||
|
||||
deals := []storagemarket.MinerDeal{{
|
||||
// Should be registered
|
||||
//stm: @MARKET_DAGSTORE_MIGRATE_DEALS_001
|
||||
State: storagemarket.StorageDealSealing,
|
||||
SectorNumber: unsealedSector1,
|
||||
ClientDealProposal: markettypes.ClientDealProposal{
|
||||
Proposal: markettypes.DealProposal{
|
||||
PieceCID: pieceCidUnsealed,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// Should be registered with lazy registration (because sector is sealed)
|
||||
State: storagemarket.StorageDealSealing,
|
||||
SectorNumber: sealedSector,
|
||||
ClientDealProposal: markettypes.ClientDealProposal{
|
||||
Proposal: markettypes.DealProposal{
|
||||
PieceCID: pieceCidSealed,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// Should be ignored because deal is no longer active
|
||||
//stm: @MARKET_DAGSTORE_MIGRATE_DEALS_003
|
||||
State: storagemarket.StorageDealError,
|
||||
SectorNumber: unsealedSector2,
|
||||
ClientDealProposal: markettypes.ClientDealProposal{
|
||||
Proposal: markettypes.DealProposal{
|
||||
PieceCID: pieceCidUnsealed2,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
// Should be ignored because deal is not yet sealing
|
||||
State: storagemarket.StorageDealFundsReserved,
|
||||
SectorNumber: unsealedSector3,
|
||||
ClientDealProposal: markettypes.ClientDealProposal{
|
||||
Proposal: markettypes.DealProposal{
|
||||
PieceCID: pieceCidUnsealed3,
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
cfg := config.DefaultStorageMiner().DAGStore
|
||||
cfg.RootDir = t.TempDir()
|
||||
|
||||
h, err := mocknet.New().GenPeer()
|
||||
require.NoError(t, err)
|
||||
|
||||
mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10, 5)
|
||||
dagst, w, err := NewDAGStore(cfg, mapi, h)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dagst)
|
||||
require.NotNil(t, w)
|
||||
|
||||
err = dagst.Start(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
migrated, err := w.MigrateDeals(ctx, deals)
|
||||
require.True(t, migrated)
|
||||
require.NoError(t, err)
|
||||
|
||||
//stm: @MARKET_DAGSTORE_GET_ALL_SHARDS_001
|
||||
info := dagst.AllShardsInfo()
|
||||
require.Len(t, info, 2)
|
||||
for _, i := range info {
|
||||
require.Equal(t, dagstore.ShardStateNew, i.ShardState)
|
||||
}
|
||||
|
||||
// Run register shard migration again
|
||||
//stm: @MARKET_DAGSTORE_MIGRATE_DEALS_002
|
||||
migrated, err = w.MigrateDeals(ctx, deals)
|
||||
require.False(t, migrated)
|
||||
require.NoError(t, err)
|
||||
|
||||
// ps.VerifyExpectations(t)
|
||||
}
|
||||
|
||||
type wrappedSA struct {
|
||||
retrievalmarket.SectorAccessor
|
||||
}
|
||||
|
||||
func (w *wrappedSA) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) {
|
||||
r, err := w.UnsealSector(ctx, sectorID, pieceOffset, length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return struct {
|
||||
io.ReadCloser
|
||||
io.Seeker
|
||||
io.ReaderAt
|
||||
}{
|
||||
ReadCloser: r,
|
||||
Seeker: nil,
|
||||
ReaderAt: nil,
|
||||
}, err
|
||||
}
|
||||
|
||||
var _ SectorAccessor = &wrappedSA{}
|
||||
@ -1,262 +0,0 @@
|
||||
// stm: #unit
|
||||
package dagstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
carindex "github.com/ipld/go-car/v2/index"
|
||||
mocknet "github.com/libp2p/go-libp2p/p2p/net/mock"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/dagstore/shard"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
)
|
||||
|
||||
// TestWrapperAcquireRecovery verifies that if acquire shard returns a "not found"
|
||||
// error, the wrapper will attempt to register the shard then reacquire
|
||||
func TestWrapperAcquireRecoveryDestroy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
pieceCid, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err := mocknet.New().GenPeer()
|
||||
require.NoError(t, err)
|
||||
// Create a DAG store wrapper
|
||||
dagst, w, err := NewDAGStore(config.DAGStoreConfig{
|
||||
RootDir: t.TempDir(),
|
||||
GCInterval: config.Duration(1 * time.Millisecond),
|
||||
}, mockLotusMount{}, h)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer dagst.Close() //nolint:errcheck
|
||||
|
||||
// Return an error from acquire shard the first time
|
||||
acquireShardErr := make(chan error, 1)
|
||||
acquireShardErr <- xerrors.Errorf("unknown shard: %w", dagstore.ErrShardUnknown)
|
||||
|
||||
// Create a mock DAG store in place of the real DAG store
|
||||
mock := &mockDagStore{
|
||||
acquireShardErr: acquireShardErr,
|
||||
acquireShardRes: dagstore.ShardResult{
|
||||
Accessor: getShardAccessor(t),
|
||||
},
|
||||
register: make(chan shard.Key, 1),
|
||||
destroy: make(chan shard.Key, 1),
|
||||
}
|
||||
w.dagst = mock
|
||||
|
||||
//stm: @MARKET_DAGSTORE_ACQUIRE_SHARD_002
|
||||
mybs, err := w.LoadShard(ctx, pieceCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect the wrapper to try to recover from the error returned from
|
||||
// acquire shard by calling register shard with the same key
|
||||
tctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-tctx.Done():
|
||||
require.Fail(t, "failed to call register")
|
||||
case k := <-mock.register:
|
||||
require.Equal(t, k.String(), pieceCid.String())
|
||||
}
|
||||
|
||||
// Verify that we can get things from the acquired blockstore
|
||||
var count int
|
||||
ch, err := mybs.AllKeysChan(ctx)
|
||||
require.NoError(t, err)
|
||||
for range ch {
|
||||
count++
|
||||
}
|
||||
require.Greater(t, count, 0)
|
||||
|
||||
// Destroy the shard
|
||||
dr := make(chan dagstore.ShardResult, 1)
|
||||
err = w.DestroyShard(ctx, pieceCid, dr)
|
||||
require.NoError(t, err)
|
||||
|
||||
dctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-dctx.Done():
|
||||
require.Fail(t, "failed to call destroy")
|
||||
case k := <-mock.destroy:
|
||||
require.Equal(t, k.String(), pieceCid.String())
|
||||
}
|
||||
|
||||
var dcount int
|
||||
dch, err := mybs.AllKeysChan(ctx)
|
||||
require.NoError(t, err)
|
||||
for range dch {
|
||||
count++
|
||||
}
|
||||
require.Equal(t, dcount, 0)
|
||||
}
|
||||
|
||||
// TestWrapperBackground verifies the behaviour of the background go routine
|
||||
func TestWrapperBackground(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
h, err := mocknet.New().GenPeer()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a DAG store wrapper
|
||||
dagst, w, err := NewDAGStore(config.DAGStoreConfig{
|
||||
RootDir: t.TempDir(),
|
||||
GCInterval: config.Duration(1 * time.Millisecond),
|
||||
}, mockLotusMount{}, h)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer dagst.Close() //nolint:errcheck
|
||||
|
||||
// Create a mock DAG store in place of the real DAG store
|
||||
mock := &mockDagStore{
|
||||
gc: make(chan struct{}, 1),
|
||||
recover: make(chan shard.Key, 1),
|
||||
close: make(chan struct{}, 1),
|
||||
}
|
||||
w.dagst = mock
|
||||
|
||||
// Start up the wrapper
|
||||
//stm: @MARKET_DAGSTORE_START_001
|
||||
err = w.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect GC to be called automatically
|
||||
//stm: @MARKET_DAGSTORE_START_002
|
||||
tctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
select {
|
||||
case <-tctx.Done():
|
||||
require.Fail(t, "failed to call GC")
|
||||
case <-mock.gc:
|
||||
}
|
||||
|
||||
// Expect that when the wrapper is closed it will call close on the
|
||||
// DAG store
|
||||
//stm: @MARKET_DAGSTORE_CLOSE_001
|
||||
err = w.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
tctx, cancel3 := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel3()
|
||||
select {
|
||||
case <-tctx.Done():
|
||||
require.Fail(t, "failed to call close")
|
||||
case <-mock.close:
|
||||
}
|
||||
}
|
||||
|
||||
type mockDagStore struct {
|
||||
acquireShardErr chan error
|
||||
acquireShardRes dagstore.ShardResult
|
||||
register chan shard.Key
|
||||
|
||||
gc chan struct{}
|
||||
recover chan shard.Key
|
||||
destroy chan shard.Key
|
||||
close chan struct{}
|
||||
}
|
||||
|
||||
func (m *mockDagStore) GetIterableIndex(key shard.Key) (carindex.IterableIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) ShardsContainingMultihash(ctx context.Context, h mh.Multihash) ([]shard.Key, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) GetShardKeysForCid(c cid.Cid) ([]shard.Key, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockDagStore) DestroyShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.DestroyOpts) error {
|
||||
m.destroy <- key
|
||||
out <- dagstore.ShardResult{Key: key}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) GetShardInfo(k shard.Key) (dagstore.ShardInfo, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockDagStore) AllShardsInfo() dagstore.AllShardsInfo {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m *mockDagStore) Start(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) RegisterShard(ctx context.Context, key shard.Key, mnt mount.Mount, out chan dagstore.ShardResult, opts dagstore.RegisterOpts) error {
|
||||
m.register <- key
|
||||
out <- dagstore.ShardResult{Key: key}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) AcquireShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.AcquireOpts) error {
|
||||
select {
|
||||
case err := <-m.acquireShardErr:
|
||||
return err
|
||||
default:
|
||||
}
|
||||
|
||||
out <- m.acquireShardRes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) RecoverShard(ctx context.Context, key shard.Key, out chan dagstore.ShardResult, _ dagstore.RecoverOpts) error {
|
||||
m.recover <- key
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) GC(ctx context.Context) (*dagstore.GCResult, error) {
|
||||
select {
|
||||
case m.gc <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockDagStore) Close() error {
|
||||
m.close <- struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockLotusMount struct {
|
||||
}
|
||||
|
||||
func (m mockLotusMount) Start(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m mockLotusMount) FetchUnsealedPiece(context.Context, cid.Cid) (mount.Reader, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m mockLotusMount) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (m mockLotusMount) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func getShardAccessor(t *testing.T) *dagstore.ShardAccessor {
|
||||
data, err := os.ReadFile("./fixtures/sample-rw-bs-v2.car")
|
||||
require.NoError(t, err)
|
||||
buff := bytes.NewReader(data)
|
||||
reader := &mount.NopCloser{Reader: buff, ReaderAt: buff, Seeker: buff}
|
||||
shardAccessor, err := dagstore.NewShardAccessor(reader, nil, nil)
|
||||
require.NoError(t, err)
|
||||
return shardAccessor
|
||||
}
|
||||
@ -1,62 +0,0 @@
|
||||
package dealfilter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
func CliStorageDealFilter(cmd string) dtypes.StorageDealFilter {
|
||||
return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) {
|
||||
d := struct {
|
||||
storagemarket.MinerDeal
|
||||
DealType string
|
||||
}{
|
||||
MinerDeal: deal,
|
||||
DealType: "storage",
|
||||
}
|
||||
return runDealFilter(ctx, cmd, d)
|
||||
}
|
||||
}
|
||||
|
||||
func CliRetrievalDealFilter(cmd string) dtypes.RetrievalDealFilter {
|
||||
return func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error) {
|
||||
d := struct {
|
||||
retrievalmarket.ProviderDealState
|
||||
DealType string
|
||||
}{
|
||||
ProviderDealState: deal,
|
||||
DealType: "retrieval",
|
||||
}
|
||||
return runDealFilter(ctx, cmd, d)
|
||||
}
|
||||
}
|
||||
|
||||
func runDealFilter(ctx context.Context, cmd string, deal interface{}) (bool, string, error) {
|
||||
j, err := json.MarshalIndent(deal, "", " ")
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
|
||||
c := exec.Command("sh", "-c", cmd)
|
||||
c.Stdin = bytes.NewReader(j)
|
||||
c.Stdout = &out
|
||||
c.Stderr = &out
|
||||
|
||||
switch err := c.Run().(type) {
|
||||
case nil:
|
||||
return true, "", nil
|
||||
case *exec.ExitError:
|
||||
return false, out.String(), nil
|
||||
default:
|
||||
return false, "filter cmd run error", err
|
||||
}
|
||||
}
|
||||
@ -1,16 +0,0 @@
|
||||
package idxprov_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type NoopMeshCreator struct {
|
||||
}
|
||||
|
||||
func NewNoopMeshCreator() *NoopMeshCreator {
|
||||
return &NoopMeshCreator{}
|
||||
}
|
||||
|
||||
func (mc NoopMeshCreator) Connect(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
@ -1,59 +0,0 @@
|
||||
package idxprov
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
)
|
||||
|
||||
var log = logging.Logger("idxprov")
|
||||
|
||||
const protectTag = "index-provider-gossipsub"
|
||||
|
||||
type MeshCreator interface {
|
||||
Connect(ctx context.Context) error
|
||||
}
|
||||
|
||||
type Libp2pMeshCreator struct {
|
||||
fullnodeApi v1api.FullNode
|
||||
marketsHost host.Host
|
||||
}
|
||||
|
||||
func (mc Libp2pMeshCreator) Connect(ctx context.Context) error {
|
||||
|
||||
// Add the markets host ID to list of daemon's protected peers first, before any attempt to
|
||||
// connect to full node over libp2p.
|
||||
marketsPeerID := mc.marketsHost.ID()
|
||||
if err := mc.fullnodeApi.NetProtectAdd(ctx, []peer.ID{marketsPeerID}); err != nil {
|
||||
return fmt.Errorf("failed to call NetProtectAdd on the full node, err: %w", err)
|
||||
}
|
||||
|
||||
faddrs, err := mc.fullnodeApi.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch full node listen addrs, err: %w", err)
|
||||
}
|
||||
|
||||
// Connect from the full node, ask it to protect the connection and protect the connection on
|
||||
// markets end too. Connection is initiated form full node to avoid the need to expose libp2p port on full node
|
||||
if err := mc.fullnodeApi.NetConnect(ctx, peer.AddrInfo{
|
||||
ID: mc.marketsHost.ID(),
|
||||
Addrs: mc.marketsHost.Addrs(),
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to connect to index provider host from full node: %w", err)
|
||||
}
|
||||
mc.marketsHost.ConnManager().Protect(faddrs.ID, protectTag)
|
||||
|
||||
log.Debugw("successfully connected to full node and asked it protect indexer provider peer conn", "fullNodeInfo", faddrs.String(),
|
||||
"peerId", marketsPeerID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewMeshCreator(fullnodeApi v1api.FullNode, marketsHost host.Host) MeshCreator {
|
||||
return Libp2pMeshCreator{fullnodeApi, marketsHost}
|
||||
}
|
||||
@ -1,76 +0,0 @@
|
||||
package markets
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
)
|
||||
|
||||
type StorageClientEvt struct {
|
||||
Event string
|
||||
Deal storagemarket.ClientDeal
|
||||
}
|
||||
|
||||
type StorageProviderEvt struct {
|
||||
Event string
|
||||
Deal storagemarket.MinerDeal
|
||||
}
|
||||
|
||||
type RetrievalClientEvt struct {
|
||||
Event string
|
||||
Deal retrievalmarket.ClientDealState
|
||||
}
|
||||
|
||||
type RetrievalProviderEvt struct {
|
||||
Event string
|
||||
Deal retrievalmarket.ProviderDealState
|
||||
}
|
||||
|
||||
// StorageClientJournaler records journal events from the storage client.
|
||||
func StorageClientJournaler(j journal.Journal, evtType journal.EventType) func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
|
||||
return func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
|
||||
j.RecordEvent(evtType, func() interface{} {
|
||||
return StorageClientEvt{
|
||||
Event: storagemarket.ClientEvents[event],
|
||||
Deal: deal,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// StorageProviderJournaler records journal events from the storage provider.
|
||||
func StorageProviderJournaler(j journal.Journal, evtType journal.EventType) func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
|
||||
return func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
|
||||
j.RecordEvent(evtType, func() interface{} {
|
||||
return StorageProviderEvt{
|
||||
Event: storagemarket.ProviderEvents[event],
|
||||
Deal: deal,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// RetrievalClientJournaler records journal events from the retrieval client.
|
||||
func RetrievalClientJournaler(j journal.Journal, evtType journal.EventType) func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
|
||||
return func(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
|
||||
j.RecordEvent(evtType, func() interface{} {
|
||||
return RetrievalClientEvt{
|
||||
Event: retrievalmarket.ClientEvents[event],
|
||||
Deal: deal,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// RetrievalProviderJournaler records journal events from the retrieval provider.
|
||||
func RetrievalProviderJournaler(j journal.Journal, evtType journal.EventType) func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
|
||||
return func(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
|
||||
j.RecordEvent(evtType, func() interface{} {
|
||||
return RetrievalProviderEvt{
|
||||
Event: retrievalmarket.ProviderEvents[event],
|
||||
Deal: deal,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,76 +0,0 @@
|
||||
package marketevents
|
||||
|
||||
import (
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
)
|
||||
|
||||
var log = logging.Logger("markets")
|
||||
|
||||
// StorageClientLogger logs events from the storage client
|
||||
func StorageClientLogger(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) {
|
||||
log.Infow("storage client event", "name", storagemarket.ClientEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message)
|
||||
}
|
||||
|
||||
// StorageProviderLogger logs events from the storage provider
|
||||
func StorageProviderLogger(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
|
||||
log.Infow("storage provider event", "name", storagemarket.ProviderEvents[event], "proposal CID", deal.ProposalCid, "state", storagemarket.DealStates[deal.State], "message", deal.Message)
|
||||
}
|
||||
|
||||
// RetrievalClientLogger logs events from the retrieval client
|
||||
func RetrievalClientLogger(event retrievalmarket.ClientEvent, deal retrievalmarket.ClientDealState) {
|
||||
method := log.Infow
|
||||
if event == retrievalmarket.ClientEventBlocksReceived {
|
||||
method = log.Debugw
|
||||
}
|
||||
method("retrieval client event", "name", retrievalmarket.ClientEvents[event], "deal ID", deal.ID, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
|
||||
}
|
||||
|
||||
// RetrievalProviderLogger logs events from the retrieval provider
|
||||
func RetrievalProviderLogger(event retrievalmarket.ProviderEvent, deal retrievalmarket.ProviderDealState) {
|
||||
method := log.Infow
|
||||
if event == retrievalmarket.ProviderEventBlockSent {
|
||||
method = log.Debugw
|
||||
}
|
||||
method("retrieval provider event", "name", retrievalmarket.ProviderEvents[event], "deal ID", deal.ID, "receiver", deal.Receiver, "state", retrievalmarket.DealStatuses[deal.Status], "message", deal.Message)
|
||||
}
|
||||
|
||||
// DataTransferLogger logs events from the data transfer module
|
||||
func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelState) {
|
||||
log.Debugw("data transfer event",
|
||||
"name", datatransfer.Events[event.Code],
|
||||
"status", datatransfer.Statuses[state.Status()],
|
||||
"transfer ID", state.TransferID(),
|
||||
"channel ID", state.ChannelID(),
|
||||
"sent", state.Sent(),
|
||||
"received", state.Received(),
|
||||
"queued", state.Queued(),
|
||||
"received count", state.ReceivedCidsTotal(),
|
||||
"total size", state.TotalSize(),
|
||||
"remote peer", state.OtherPeer(),
|
||||
"event message", event.Message,
|
||||
"channel message", state.Message())
|
||||
}
|
||||
|
||||
// ReadyLogger returns a function to log the results of module initialization
|
||||
func ReadyLogger(module string) func(error) {
|
||||
return func(err error) {
|
||||
if err != nil {
|
||||
log.Errorw("module initialization error", "module", module, "err", err)
|
||||
} else {
|
||||
log.Infow("module ready", "module", module)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type RetrievalEvent struct {
|
||||
Event retrievalmarket.ClientEvent
|
||||
Status retrievalmarket.DealStatus
|
||||
BytesReceived uint64
|
||||
FundsSpent abi.TokenAmount
|
||||
Err string
|
||||
}
|
||||
@ -1,50 +0,0 @@
|
||||
package pricing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os/exec"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
func ExternalRetrievalPricingFunc(cmd string) dtypes.RetrievalPricingFunc {
|
||||
return func(ctx context.Context, pricingInput retrievalmarket.PricingInput) (retrievalmarket.Ask, error) {
|
||||
return runPricingFunc(ctx, cmd, pricingInput)
|
||||
}
|
||||
}
|
||||
|
||||
func runPricingFunc(_ context.Context, cmd string, params interface{}) (retrievalmarket.Ask, error) {
|
||||
j, err := json.Marshal(params)
|
||||
if err != nil {
|
||||
return retrievalmarket.Ask{}, err
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
var errb bytes.Buffer
|
||||
|
||||
c := exec.Command("sh", "-c", cmd)
|
||||
c.Stdin = bytes.NewReader(j)
|
||||
c.Stdout = &out
|
||||
c.Stderr = &errb
|
||||
|
||||
switch err := c.Run().(type) {
|
||||
case nil:
|
||||
bz := out.Bytes()
|
||||
resp := retrievalmarket.Ask{}
|
||||
|
||||
if err := json.Unmarshal(bz, &resp); err != nil {
|
||||
return resp, xerrors.Errorf("failed to parse pricing output %s, err=%w", string(bz), err)
|
||||
}
|
||||
return resp, nil
|
||||
case *exec.ExitError:
|
||||
return retrievalmarket.Ask{}, xerrors.Errorf("pricing func exited with error: %s", errb.String())
|
||||
default:
|
||||
return retrievalmarket.Ask{}, xerrors.Errorf("pricing func cmd run error: %w", err)
|
||||
}
|
||||
}
|
||||
@ -1,127 +0,0 @@
|
||||
package retrievaladapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
payapi "github.com/filecoin-project/lotus/node/impl/paych"
|
||||
)
|
||||
|
||||
type retrievalClientNode struct {
|
||||
forceOffChain bool
|
||||
|
||||
chainAPI full.ChainAPI
|
||||
payAPI payapi.PaychAPI
|
||||
stateAPI full.StateAPI
|
||||
}
|
||||
|
||||
// NewRetrievalClientNode returns a new node adapter for a retrieval client that talks to the
|
||||
// Lotus Node
|
||||
func NewRetrievalClientNode(forceOffChain bool, payAPI payapi.PaychAPI, chainAPI full.ChainAPI, stateAPI full.StateAPI) retrievalmarket.RetrievalClientNode {
|
||||
return &retrievalClientNode{
|
||||
forceOffChain: forceOffChain,
|
||||
chainAPI: chainAPI,
|
||||
payAPI: payAPI,
|
||||
stateAPI: stateAPI,
|
||||
}
|
||||
}
|
||||
|
||||
// GetOrCreatePaymentChannel sets up a new payment channel if one does not exist
|
||||
// between a client and a miner and ensures the client has the given amount of
|
||||
// funds available in the channel.
|
||||
func (rcn *retrievalClientNode) GetOrCreatePaymentChannel(ctx context.Context, clientAddress address.Address, minerAddress address.Address, clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) {
|
||||
// TODO: respect the provided TipSetToken (a serialized TipSetKey) when
|
||||
// querying the chain
|
||||
ci, err := rcn.payAPI.PaychGet(ctx, clientAddress, minerAddress, clientFundsAvailable, api.PaychGetOpts{
|
||||
OffChain: rcn.forceOffChain,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorw("paych get failed", "error", err)
|
||||
return address.Undef, cid.Undef, err
|
||||
}
|
||||
|
||||
return ci.Channel, ci.WaitSentinel, nil
|
||||
}
|
||||
|
||||
// Allocate late creates a lane within a payment channel so that calls to
|
||||
// CreatePaymentVoucher will automatically make vouchers only for the difference
|
||||
// in total
|
||||
func (rcn *retrievalClientNode) AllocateLane(ctx context.Context, paymentChannel address.Address) (uint64, error) {
|
||||
return rcn.payAPI.PaychAllocateLane(ctx, paymentChannel)
|
||||
}
|
||||
|
||||
// CreatePaymentVoucher creates a new payment voucher in the given lane for a
|
||||
// given payment channel so that all the payment vouchers in the lane add up
|
||||
// to the given amount (so the payment voucher will be for the difference)
|
||||
func (rcn *retrievalClientNode) CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, amount abi.TokenAmount, lane uint64, tok shared.TipSetToken) (*paychtypes.SignedVoucher, error) {
|
||||
// TODO: respect the provided TipSetToken (a serialized TipSetKey) when
|
||||
// querying the chain
|
||||
voucher, err := rcn.payAPI.PaychVoucherCreate(ctx, paymentChannel, amount, lane)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if voucher.Voucher == nil {
|
||||
return nil, retrievalmarket.NewShortfallError(voucher.Shortfall)
|
||||
}
|
||||
return voucher.Voucher, nil
|
||||
}
|
||||
|
||||
func (rcn *retrievalClientNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) {
|
||||
head, err := rcn.chainAPI.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return head.Key().Bytes(), head.Height(), nil
|
||||
}
|
||||
|
||||
func (rcn *retrievalClientNode) WaitForPaymentChannelReady(ctx context.Context, messageCID cid.Cid) (address.Address, error) {
|
||||
return rcn.payAPI.PaychGetWaitReady(ctx, messageCID)
|
||||
}
|
||||
|
||||
func (rcn *retrievalClientNode) CheckAvailableFunds(ctx context.Context, paymentChannel address.Address) (retrievalmarket.ChannelAvailableFunds, error) {
|
||||
|
||||
channelAvailableFunds, err := rcn.payAPI.PaychAvailableFunds(ctx, paymentChannel)
|
||||
if err != nil {
|
||||
return retrievalmarket.ChannelAvailableFunds{}, err
|
||||
}
|
||||
return retrievalmarket.ChannelAvailableFunds{
|
||||
ConfirmedAmt: channelAvailableFunds.ConfirmedAmt,
|
||||
PendingAmt: channelAvailableFunds.PendingAmt,
|
||||
PendingWaitSentinel: channelAvailableFunds.PendingWaitSentinel,
|
||||
QueuedAmt: channelAvailableFunds.QueuedAmt,
|
||||
VoucherReedeemedAmt: channelAvailableFunds.VoucherReedeemedAmt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rcn *retrievalClientNode) GetKnownAddresses(ctx context.Context, p retrievalmarket.RetrievalPeer, encodedTs shared.TipSetToken) ([]multiaddr.Multiaddr, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(encodedTs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mi, err := rcn.stateAPI.StateMinerInfo(ctx, p.Address, tsk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs))
|
||||
for _, a := range mi.Multiaddrs {
|
||||
maddr, err := multiaddr.NewMultiaddrBytes(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
multiaddrs = append(multiaddrs, maddr)
|
||||
}
|
||||
|
||||
return multiaddrs, nil
|
||||
}
|
||||
@ -1,166 +0,0 @@
|
||||
package retrievaladapter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
bstore "github.com/ipfs/boxo/blockstore"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipld/go-car/v2/blockstore"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
lbstore "github.com/filecoin-project/lotus/blockstore"
|
||||
)
|
||||
|
||||
// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore.
|
||||
// To be used in combination with IPFS integration.
|
||||
type ProxyBlockstoreAccessor struct {
|
||||
Blockstore bstore.Blockstore
|
||||
}
|
||||
|
||||
var _ retrievalmarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil)
|
||||
|
||||
func NewFixedBlockstoreAccessor(bs bstore.Blockstore) retrievalmarket.BlockstoreAccessor {
|
||||
return &ProxyBlockstoreAccessor{Blockstore: bs}
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Get(_ retrievalmarket.DealID, _ retrievalmarket.PayloadCID) (bstore.Blockstore, error) {
|
||||
return p.Blockstore, nil
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Done(_ retrievalmarket.DealID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewAPIBlockstoreAdapter(sub retrievalmarket.BlockstoreAccessor) *APIBlockstoreAccessor {
|
||||
return &APIBlockstoreAccessor{
|
||||
sub: sub,
|
||||
retrStores: map[retrievalmarket.DealID]api.RemoteStoreID{},
|
||||
remoteStores: map[api.RemoteStoreID]bstore.Blockstore{},
|
||||
}
|
||||
}
|
||||
|
||||
// APIBlockstoreAccessor adds support to API-specified remote blockstores
|
||||
type APIBlockstoreAccessor struct {
|
||||
sub retrievalmarket.BlockstoreAccessor
|
||||
|
||||
retrStores map[retrievalmarket.DealID]api.RemoteStoreID
|
||||
remoteStores map[api.RemoteStoreID]bstore.Blockstore
|
||||
|
||||
accessLk sync.Mutex
|
||||
}
|
||||
|
||||
func (a *APIBlockstoreAccessor) Get(id retrievalmarket.DealID, payloadCID retrievalmarket.PayloadCID) (bstore.Blockstore, error) {
|
||||
a.accessLk.Lock()
|
||||
defer a.accessLk.Unlock()
|
||||
|
||||
as, has := a.retrStores[id]
|
||||
if !has {
|
||||
return a.sub.Get(id, payloadCID)
|
||||
}
|
||||
|
||||
return a.remoteStores[as], nil
|
||||
}
|
||||
|
||||
func (a *APIBlockstoreAccessor) Done(id retrievalmarket.DealID) error {
|
||||
a.accessLk.Lock()
|
||||
defer a.accessLk.Unlock()
|
||||
|
||||
if _, has := a.retrStores[id]; has {
|
||||
delete(a.retrStores, id)
|
||||
return nil
|
||||
}
|
||||
return a.sub.Done(id)
|
||||
}
|
||||
|
||||
func (a *APIBlockstoreAccessor) RegisterDealToRetrievalStore(id retrievalmarket.DealID, sid api.RemoteStoreID) error {
|
||||
a.accessLk.Lock()
|
||||
defer a.accessLk.Unlock()
|
||||
|
||||
if _, has := a.retrStores[id]; has {
|
||||
return xerrors.Errorf("apistore for deal %d already registered", id)
|
||||
}
|
||||
if _, has := a.remoteStores[sid]; !has {
|
||||
return xerrors.Errorf("remote store not found")
|
||||
}
|
||||
|
||||
a.retrStores[id] = sid
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *APIBlockstoreAccessor) RegisterApiStore(sid api.RemoteStoreID, st *lbstore.NetworkStore) error {
|
||||
a.accessLk.Lock()
|
||||
defer a.accessLk.Unlock()
|
||||
|
||||
if _, has := a.remoteStores[sid]; has {
|
||||
return xerrors.Errorf("remote store already registered with this uuid")
|
||||
}
|
||||
|
||||
a.remoteStores[sid] = st
|
||||
|
||||
st.OnClose(func() {
|
||||
a.accessLk.Lock()
|
||||
defer a.accessLk.Unlock()
|
||||
|
||||
if _, has := a.remoteStores[sid]; has {
|
||||
delete(a.remoteStores, sid)
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ retrievalmarket.BlockstoreAccessor = &APIBlockstoreAccessor{}
|
||||
|
||||
type CARBlockstoreAccessor struct {
|
||||
rootdir string
|
||||
lk sync.Mutex
|
||||
open map[retrievalmarket.DealID]*blockstore.ReadWrite
|
||||
}
|
||||
|
||||
var _ retrievalmarket.BlockstoreAccessor = (*CARBlockstoreAccessor)(nil)
|
||||
|
||||
func NewCARBlockstoreAccessor(rootdir string) *CARBlockstoreAccessor {
|
||||
return &CARBlockstoreAccessor{
|
||||
rootdir: rootdir,
|
||||
open: make(map[retrievalmarket.DealID]*blockstore.ReadWrite),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CARBlockstoreAccessor) Get(id retrievalmarket.DealID, payloadCid retrievalmarket.PayloadCID) (bstore.Blockstore, error) {
|
||||
c.lk.Lock()
|
||||
defer c.lk.Unlock()
|
||||
|
||||
bs, ok := c.open[id]
|
||||
if ok {
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
path := c.PathFor(id)
|
||||
bs, err := blockstore.OpenReadWrite(path, []cid.Cid{payloadCid}, blockstore.UseWholeCIDs(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.open[id] = bs
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
func (c *CARBlockstoreAccessor) Done(id retrievalmarket.DealID) error {
|
||||
c.lk.Lock()
|
||||
defer c.lk.Unlock()
|
||||
|
||||
bs, ok := c.open[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(c.open, id)
|
||||
return bs.Finalize()
|
||||
}
|
||||
|
||||
func (c *CARBlockstoreAccessor) PathFor(id retrievalmarket.DealID) string {
|
||||
return filepath.Join(c.rootdir, fmt.Sprintf("%d.car", id))
|
||||
}
|
||||
@ -1,108 +0,0 @@
|
||||
package retrievaladapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var log = logging.Logger("retrievaladapter")
|
||||
|
||||
type retrievalProviderNode struct {
|
||||
full v1api.FullNode
|
||||
}
|
||||
|
||||
var _ retrievalmarket.RetrievalProviderNode = (*retrievalProviderNode)(nil)
|
||||
|
||||
// NewRetrievalProviderNode returns a new node adapter for a retrieval provider that talks to the
|
||||
// Lotus Node
|
||||
func NewRetrievalProviderNode(full v1api.FullNode) retrievalmarket.RetrievalProviderNode {
|
||||
return &retrievalProviderNode{full: full}
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
mi, err := rpn.full.StateMinerInfo(ctx, miner, tsk)
|
||||
return mi.Worker, err
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paychtypes.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) {
|
||||
// TODO: respect the provided TipSetToken (a serialized TipSetKey) when
|
||||
// querying the chain
|
||||
added, err := rpn.full.PaychVoucherAdd(ctx, paymentChannel, voucher, proof, expectedAmount)
|
||||
return added, err
|
||||
}
|
||||
|
||||
func (rpn *retrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) {
|
||||
head, err := rpn.full.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return head.Key().Bytes(), head.Height(), nil
|
||||
}
|
||||
|
||||
// GetRetrievalPricingInput takes a set of candidate storage deals that can serve a retrieval request,
|
||||
// and returns an minimally populated PricingInput. This PricingInput should be enhanced
|
||||
// with more data, and passed to the pricing function to determine the final quoted price.
|
||||
func (rpn *retrievalProviderNode) GetRetrievalPricingInput(ctx context.Context, pieceCID cid.Cid, storageDeals []abi.DealID) (retrievalmarket.PricingInput, error) {
|
||||
resp := retrievalmarket.PricingInput{}
|
||||
|
||||
head, err := rpn.full.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return resp, xerrors.Errorf("failed to get chain head: %w", err)
|
||||
}
|
||||
tsk := head.Key()
|
||||
|
||||
var mErr error
|
||||
|
||||
for _, dealID := range storageDeals {
|
||||
ds, err := rpn.full.StateMarketStorageDeal(ctx, dealID, tsk)
|
||||
if err != nil {
|
||||
log.Warnf("failed to look up deal %d on chain: err=%w", dealID, err)
|
||||
mErr = multierror.Append(mErr, err)
|
||||
continue
|
||||
}
|
||||
if ds.Proposal.VerifiedDeal {
|
||||
resp.VerifiedDeal = true
|
||||
}
|
||||
|
||||
if ds.Proposal.PieceCID.Equals(pieceCID) {
|
||||
resp.PieceSize = ds.Proposal.PieceSize.Unpadded()
|
||||
}
|
||||
|
||||
// If we've discovered a verified deal with the required PieceCID, we don't need
|
||||
// to lookup more deals and we're done.
|
||||
if resp.VerifiedDeal && resp.PieceSize != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Note: The piece size can never actually be zero. We only use it to here
|
||||
// to assert that we didn't find a matching piece.
|
||||
if resp.PieceSize == 0 {
|
||||
if mErr == nil {
|
||||
return resp, xerrors.New("failed to find matching piece")
|
||||
}
|
||||
|
||||
return resp, xerrors.Errorf("failed to fetch storage deal state: %w", mErr)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@ -1,206 +0,0 @@
|
||||
// stm: #unit
|
||||
package retrievaladapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
testnet "github.com/filecoin-project/go-fil-markets/shared_testutil"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/mocks"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func TestGetPricingInput(t *testing.T) {
|
||||
//stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001
|
||||
ctx := context.Background()
|
||||
tsk := &types.TipSet{}
|
||||
key := tsk.Key()
|
||||
|
||||
pcid := testnet.GenerateCids(1)[0]
|
||||
deals := []abi.DealID{1, 2}
|
||||
paddedSize := abi.PaddedPieceSize(128)
|
||||
unpaddedSize := paddedSize.Unpadded()
|
||||
|
||||
tcs := map[string]struct {
|
||||
pieceCid cid.Cid
|
||||
deals []abi.DealID
|
||||
fFnc func(node *mocks.MockFullNode)
|
||||
|
||||
expectedErrorStr string
|
||||
expectedVerified bool
|
||||
expectedPieceSize abi.UnpaddedPieceSize
|
||||
}{
|
||||
"error when fails to fetch chain head": {
|
||||
fFnc: func(n *mocks.MockFullNode) {
|
||||
n.EXPECT().ChainHead(gomock.Any()).Return(tsk, xerrors.New("chain head error")).Times(1)
|
||||
},
|
||||
expectedErrorStr: "chain head error",
|
||||
},
|
||||
|
||||
"error when no piece matches": {
|
||||
fFnc: func(n *mocks.MockFullNode) {
|
||||
out1 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: testnet.GenerateCids(1)[0],
|
||||
},
|
||||
}
|
||||
out2 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: testnet.GenerateCids(1)[0],
|
||||
},
|
||||
}
|
||||
|
||||
n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1)
|
||||
gomock.InOrder(
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil),
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil),
|
||||
)
|
||||
|
||||
},
|
||||
expectedErrorStr: "failed to find matching piece",
|
||||
},
|
||||
|
||||
"error when fails to fetch deal state": {
|
||||
fFnc: func(n *mocks.MockFullNode) {
|
||||
out1 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pcid,
|
||||
PieceSize: paddedSize,
|
||||
},
|
||||
}
|
||||
out2 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: testnet.GenerateCids(1)[0],
|
||||
VerifiedDeal: true,
|
||||
},
|
||||
}
|
||||
|
||||
n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1)
|
||||
gomock.InOrder(
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("error 1")),
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, xerrors.New("error 2")),
|
||||
)
|
||||
|
||||
},
|
||||
expectedErrorStr: "failed to fetch storage deal state",
|
||||
},
|
||||
|
||||
"verified is true even if one deal is verified and we get the correct piecesize": {
|
||||
fFnc: func(n *mocks.MockFullNode) {
|
||||
out1 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pcid,
|
||||
PieceSize: paddedSize,
|
||||
},
|
||||
}
|
||||
out2 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: testnet.GenerateCids(1)[0],
|
||||
VerifiedDeal: true,
|
||||
},
|
||||
}
|
||||
|
||||
n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1)
|
||||
gomock.InOrder(
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil),
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil),
|
||||
)
|
||||
|
||||
},
|
||||
expectedPieceSize: unpaddedSize,
|
||||
expectedVerified: true,
|
||||
},
|
||||
|
||||
"success even if one deal state fetch errors out but the other deal is verified and has the required piececid": {
|
||||
fFnc: func(n *mocks.MockFullNode) {
|
||||
out1 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: testnet.GenerateCids(1)[0],
|
||||
},
|
||||
}
|
||||
out2 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pcid,
|
||||
PieceSize: paddedSize,
|
||||
VerifiedDeal: true,
|
||||
},
|
||||
}
|
||||
|
||||
n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1)
|
||||
gomock.InOrder(
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, xerrors.New("some error")),
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil),
|
||||
)
|
||||
|
||||
},
|
||||
expectedPieceSize: unpaddedSize,
|
||||
expectedVerified: true,
|
||||
},
|
||||
|
||||
"verified is false if both deals are unverified and we get the correct piece size": {
|
||||
fFnc: func(n *mocks.MockFullNode) {
|
||||
out1 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: pcid,
|
||||
PieceSize: paddedSize,
|
||||
VerifiedDeal: false,
|
||||
},
|
||||
}
|
||||
out2 := &api.MarketDeal{
|
||||
Proposal: market.DealProposal{
|
||||
PieceCID: testnet.GenerateCids(1)[0],
|
||||
VerifiedDeal: false,
|
||||
},
|
||||
}
|
||||
|
||||
n.EXPECT().ChainHead(gomock.Any()).Return(tsk, nil).Times(1)
|
||||
gomock.InOrder(
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[0], key).Return(out1, nil),
|
||||
n.EXPECT().StateMarketStorageDeal(gomock.Any(), deals[1], key).Return(out2, nil),
|
||||
)
|
||||
|
||||
},
|
||||
expectedPieceSize: unpaddedSize,
|
||||
expectedVerified: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range tcs {
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
mockCtrl := gomock.NewController(t)
|
||||
// when test is done, assert expectations on all mock objects.
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
mockFull := mocks.NewMockFullNode(mockCtrl)
|
||||
rpn := &retrievalProviderNode{
|
||||
full: mockFull,
|
||||
}
|
||||
if tc.fFnc != nil {
|
||||
tc.fFnc(mockFull)
|
||||
}
|
||||
|
||||
resp, err := rpn.GetRetrievalPricingInput(ctx, pcid, deals)
|
||||
|
||||
if tc.expectedErrorStr != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectedErrorStr)
|
||||
require.Equal(t, retrievalmarket.PricingInput{}, resp)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expectedPieceSize, resp.PieceSize)
|
||||
require.Equal(t, tc.expectedVerified, resp.VerifiedDeal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,136 +0,0 @@
|
||||
package sectoraccessor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/storage/sealer"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
)
|
||||
|
||||
var log = logging.Logger("sectoraccessor")
|
||||
|
||||
type sectorAccessor struct {
|
||||
maddr address.Address
|
||||
secb sectorblocks.SectorBuilder
|
||||
pp sealer.PieceProvider
|
||||
full v1api.FullNode
|
||||
}
|
||||
|
||||
var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil)
|
||||
|
||||
func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sealer.PieceProvider, full v1api.FullNode) dagstore.SectorAccessor {
|
||||
return §orAccessor{address.Address(maddr), secb, pp, full}
|
||||
}
|
||||
|
||||
func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
|
||||
return sa.UnsealSectorAt(ctx, sectorID, pieceOffset, length)
|
||||
}
|
||||
|
||||
func (sa *sectorAccessor) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) {
|
||||
log.Debugf("get sector %d, pieceOffset %d, length %d", sectorID, pieceOffset, length)
|
||||
si, err := sa.sectorsStatus(ctx, sectorID, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(sa.maddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref := storiface.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
var commD cid.Cid
|
||||
if si.CommD != nil {
|
||||
commD = *si.CommD
|
||||
}
|
||||
|
||||
// Get a reader for the piece, unsealing the piece if necessary
|
||||
log.Debugf("read piece in sector %d, pieceOffset %d, length %d from miner %d", sectorID, pieceOffset, length, mid)
|
||||
r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(pieceOffset), length, si.Ticket.Value, commD)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err)
|
||||
}
|
||||
_ = unsealed // todo: use
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (sa *sectorAccessor) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) {
|
||||
si, err := sa.sectorsStatus(ctx, sectorID, true)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get sector info: %w", err)
|
||||
}
|
||||
|
||||
mid, err := address.IDFromAddress(sa.maddr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
ref := storiface.SectorRef{
|
||||
ID: abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: sectorID,
|
||||
},
|
||||
ProofType: si.SealProof,
|
||||
}
|
||||
|
||||
log.Debugf("will call IsUnsealed now sector=%+v, offset=%d, size=%d", sectorID, offset, length)
|
||||
return sa.pp.IsUnsealed(ctx, ref, storiface.UnpaddedByteIndex(offset), length)
|
||||
}
|
||||
|
||||
func (sa *sectorAccessor) sectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) {
|
||||
sInfo, err := sa.secb.SectorsStatus(ctx, sid, false)
|
||||
if err != nil {
|
||||
return api.SectorInfo{}, err
|
||||
}
|
||||
|
||||
if !showOnChainInfo {
|
||||
return sInfo, nil
|
||||
}
|
||||
|
||||
onChainInfo, err := sa.full.StateSectorGetInfo(ctx, sa.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, err
|
||||
}
|
||||
if onChainInfo == nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.SealProof = onChainInfo.SealProof
|
||||
sInfo.Activation = onChainInfo.Activation
|
||||
sInfo.Expiration = onChainInfo.Expiration
|
||||
sInfo.DealWeight = onChainInfo.DealWeight
|
||||
sInfo.VerifiedDealWeight = onChainInfo.VerifiedDealWeight
|
||||
sInfo.InitialPledge = onChainInfo.InitialPledge
|
||||
|
||||
ex, err := sa.full.StateSectorExpiration(ctx, sa.maddr, sid, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return sInfo, nil
|
||||
}
|
||||
sInfo.OnTime = ex.OnTime
|
||||
sInfo.Early = ex.Early
|
||||
|
||||
return sInfo, nil
|
||||
}
|
||||
@ -1,55 +0,0 @@
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type apiWrapper struct {
|
||||
api interface {
|
||||
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
|
||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
ChainPutObj(context.Context, blocks.Block) error
|
||||
}
|
||||
}
|
||||
|
||||
func (ca *apiWrapper) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) {
|
||||
store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(ca.api)))
|
||||
|
||||
preAct, err := ca.api.StateGetActor(ctx, actor, pre)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting pre actor: %w", err)
|
||||
}
|
||||
curAct, err := ca.api.StateGetActor(ctx, actor, cur)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting cur actor: %w", err)
|
||||
}
|
||||
|
||||
preSt, err := miner.Load(store, preAct)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading miner actor: %w", err)
|
||||
}
|
||||
curSt, err := miner.Load(store, curAct)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading miner actor: %w", err)
|
||||
}
|
||||
|
||||
diff, err := miner.DiffPreCommits(preSt, curSt)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("diff precommits: %w", err)
|
||||
}
|
||||
|
||||
return diff, err
|
||||
}
|
||||
@ -1,446 +0,0 @@
|
||||
package storageadapter
|
||||
|
||||
// this file implements storagemarket.StorageClientNode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
marketactor "github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
"github.com/filecoin-project/lotus/chain/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/markets/utils"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
)
|
||||
|
||||
type ClientNodeAdapter struct {
|
||||
*clientApi
|
||||
|
||||
fundmgr *market.FundManager
|
||||
ev *events.Events
|
||||
dsMatcher *dealStateMatcher
|
||||
scMgr *SectorCommittedManager
|
||||
}
|
||||
|
||||
type clientApi struct {
|
||||
full.ChainAPI
|
||||
full.StateAPI
|
||||
full.MpoolAPI
|
||||
}
|
||||
|
||||
func NewClientNodeAdapter(mctx helpers.MetricsCtx, lc fx.Lifecycle, stateapi full.StateAPI, chain full.ChainAPI, mpool full.MpoolAPI, fundmgr *market.FundManager) (storagemarket.StorageClientNode, error) {
|
||||
capi := &clientApi{chain, stateapi, mpool}
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
|
||||
ev, err := events.NewEvents(ctx, capi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := &ClientNodeAdapter{
|
||||
clientApi: capi,
|
||||
|
||||
fundmgr: fundmgr,
|
||||
ev: ev,
|
||||
dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(capi))),
|
||||
}
|
||||
a.scMgr = NewSectorCommittedManager(ev, a, &apiWrapper{api: capi})
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) ListStorageProviders(ctx context.Context, encodedTs shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(encodedTs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addresses, err := c.StateListMiners(ctx, tsk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out []*storagemarket.StorageProviderInfo
|
||||
|
||||
for _, addr := range addresses {
|
||||
mi, err := c.GetMinerInfo(ctx, addr, encodedTs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out = append(out, mi)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte, encodedTs shared.TipSetToken) (bool, error) {
|
||||
addr, err := c.StateAccountKey(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = sigs.Verify(&sig, addr, input)
|
||||
return err == nil, err
|
||||
}
|
||||
|
||||
// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients.
|
||||
func (c *ClientNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) {
|
||||
// (Provider Node API)
|
||||
smsg, err := c.MpoolPushMessage(ctx, &types.Message{
|
||||
To: marketactor.Address,
|
||||
From: addr,
|
||||
Value: amount,
|
||||
Method: builtin6.MethodsMarket.AddBalance,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return c.fundmgr.Reserve(ctx, wallet, addr, amt)
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error {
|
||||
return c.fundmgr.Release(addr, amt)
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(encodedTs)
|
||||
if err != nil {
|
||||
return storagemarket.Balance{}, err
|
||||
}
|
||||
|
||||
bal, err := c.StateMarketBalance(ctx, addr, tsk)
|
||||
if err != nil {
|
||||
return storagemarket.Balance{}, err
|
||||
}
|
||||
|
||||
return utils.ToSharedBalance(bal), nil
|
||||
}
|
||||
|
||||
// ValidatePublishedDeal validates that the provided deal has appeared on chain and references the same ClientDeal
|
||||
// returns the Deal id if there is no error
|
||||
// TODO: Don't return deal ID
|
||||
func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) {
|
||||
log.Infow("DEAL ACCEPTED!")
|
||||
|
||||
pubmsg, err := c.ChainGetMessage(ctx, *deal.PublishMessage)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting deal publish message: %w", err)
|
||||
}
|
||||
|
||||
mi, err := c.StateMinerInfo(ctx, deal.Proposal.Provider, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting miner worker failed: %w", err)
|
||||
}
|
||||
|
||||
fromid, err := c.StateLookupID(ctx, pubmsg.From, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("failed to resolve from msg ID addr: %w", err)
|
||||
}
|
||||
|
||||
var pubOk bool
|
||||
pubAddrs := append([]address.Address{mi.Worker, mi.Owner}, mi.ControlAddresses...)
|
||||
for _, a := range pubAddrs {
|
||||
if fromid == a {
|
||||
pubOk = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !pubOk {
|
||||
return 0, xerrors.Errorf("deal wasn't published by storage provider: from=%s, provider=%s,%+v", pubmsg.From, deal.Proposal.Provider, pubAddrs)
|
||||
}
|
||||
|
||||
if pubmsg.To != marketactor.Address {
|
||||
return 0, xerrors.Errorf("deal publish message wasn't set to StorageMarket actor (to=%s)", pubmsg.To)
|
||||
}
|
||||
|
||||
if pubmsg.Method != builtin6.MethodsMarket.PublishStorageDeals {
|
||||
return 0, xerrors.Errorf("deal publish message called incorrect method (method=%s)", pubmsg.Method)
|
||||
}
|
||||
|
||||
var params markettypes.PublishStorageDealsParams
|
||||
if err := params.UnmarshalCBOR(bytes.NewReader(pubmsg.Params)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
dealIdx := -1
|
||||
for i, storageDeal := range params.Deals {
|
||||
// TODO: make it less hacky
|
||||
sd := storageDeal
|
||||
eq, err := cborutil.Equals(&deal.ClientDealProposal, &sd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if eq {
|
||||
dealIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if dealIdx == -1 {
|
||||
return 0, xerrors.Errorf("deal publish didn't contain our deal (message cid: %s)", deal.PublishMessage)
|
||||
}
|
||||
|
||||
// TODO: timeout
|
||||
ret, err := c.StateWaitMsg(ctx, *deal.PublishMessage, build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("waiting for deal publish message: %w", err)
|
||||
}
|
||||
if ret.Receipt.ExitCode != 0 {
|
||||
return 0, xerrors.Errorf("deal publish failed: exit=%d", ret.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
nv, err := c.StateNetworkVersion(ctx, ret.TipSet)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
|
||||
res, err := marketactor.DecodePublishStorageDealsReturn(ret.Receipt.Return, nv)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("decoding deal publish return: %w", err)
|
||||
}
|
||||
|
||||
dealIDs, err := res.DealIDs()
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("getting dealIDs: %w", err)
|
||||
}
|
||||
|
||||
if dealIdx >= len(params.Deals) {
|
||||
return 0, xerrors.Errorf(
|
||||
"deal index %d out of bounds of deals (len %d) in publish deals message %s",
|
||||
dealIdx, len(params.Deals), pubmsg.Cid())
|
||||
}
|
||||
|
||||
valid, outIdx, err := res.IsDealValid(uint64(dealIdx))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("determining deal validity: %w", err)
|
||||
}
|
||||
|
||||
if !valid {
|
||||
return 0, xerrors.New("deal was invalid at publication")
|
||||
}
|
||||
|
||||
return dealIDs[outIdx], nil
|
||||
}
|
||||
|
||||
var clientOverestimation = struct {
|
||||
numerator int64
|
||||
denominator int64
|
||||
}{
|
||||
numerator: 12,
|
||||
denominator: 10,
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) {
|
||||
bounds, err := c.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return abi.TokenAmount{}, abi.TokenAmount{}, err
|
||||
}
|
||||
|
||||
min := big.Mul(bounds.Min, big.NewInt(clientOverestimation.numerator))
|
||||
min = big.Div(min, big.NewInt(clientOverestimation.denominator))
|
||||
return min, bounds.Max, nil
|
||||
}
|
||||
|
||||
// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer)
|
||||
func (c *ClientNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error {
|
||||
return c.scMgr.OnDealSectorPreCommitted(ctx, provider, proposal, *publishCid, cb)
|
||||
}
|
||||
|
||||
// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer)
|
||||
func (c *ClientNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error {
|
||||
return c.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, *publishCid, cb)
|
||||
}
|
||||
|
||||
// TODO: Replace dealID parameter with DealProposal
|
||||
func (c *ClientNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error {
|
||||
head, err := c.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("client: failed to get chain head: %w", err)
|
||||
}
|
||||
|
||||
sd, err := c.StateMarketStorageDeal(ctx, dealID, head.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("client: failed to look up deal %d on chain: %w", dealID, err)
|
||||
}
|
||||
|
||||
// Called immediately to check if the deal has already expired or been slashed
|
||||
checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
|
||||
if ts == nil {
|
||||
// keep listening for events
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// Check if the deal has already expired
|
||||
if sd.Proposal.EndEpoch <= ts.Height() {
|
||||
onDealExpired(nil)
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// If there is no deal assume it's already been slashed
|
||||
if sd.State.SectorStartEpoch < 0 {
|
||||
onDealSlashed(ts.Height(), nil)
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// No events have occurred yet, so return
|
||||
// done: false, more: true (keep listening for events)
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// Called when there was a match against the state change we're looking for
|
||||
// and the chain has advanced to the confidence height
|
||||
stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) {
|
||||
// Check if the deal has already expired
|
||||
if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() {
|
||||
onDealExpired(nil)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Timeout waiting for state change
|
||||
if states == nil {
|
||||
log.Error("timed out waiting for deal expiry")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
changedDeals, ok := states.(state.ChangedDeals)
|
||||
if !ok {
|
||||
panic("Expected state.ChangedDeals")
|
||||
}
|
||||
|
||||
deal, ok := changedDeals[dealID]
|
||||
if !ok {
|
||||
// No change to deal
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Deal was slashed
|
||||
if deal.To == nil {
|
||||
onDealSlashed(ts2.Height(), nil)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Called when there was a chain reorg and the state change was reverted
|
||||
revert := func(ctx context.Context, ts *types.TipSet) error {
|
||||
// TODO: Is it ok to just ignore this?
|
||||
log.Warn("deal state reverted; TODO: actually handle this!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch for state changes to the deal
|
||||
match := c.dsMatcher.matcher(ctx, dealID)
|
||||
|
||||
// Wait until after the end epoch for the deal and then timeout
|
||||
timeout := (sd.Proposal.EndEpoch - head.Height()) + 1
|
||||
if err := c.ev.StateChanged(checkFunc, stateChanged, revert, int(build.MessageConfidence)+1, timeout, match); err != nil {
|
||||
return xerrors.Errorf("failed to set up state changed handler: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) SignProposal(ctx context.Context, signer address.Address, proposal markettypes.DealProposal) (*markettypes.ClientDealProposal, error) {
|
||||
// TODO: output spec signed proposal
|
||||
buf, err := cborutil.Dump(&proposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signer, err = c.StateAccountKey(ctx, signer, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sig, err := c.Wallet.WalletSign(ctx, signer, buf, api.MsgMeta{
|
||||
Type: api.MTDealProposal,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &markettypes.ClientDealProposal{
|
||||
Proposal: proposal,
|
||||
ClientSignature: *sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) GetDefaultWalletAddress(ctx context.Context) (address.Address, error) {
|
||||
addr, err := c.DefWallet.GetDefault()
|
||||
return addr, err
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) {
|
||||
head, err := c.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return head.Key().Bytes(), head.Height(), nil
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error {
|
||||
receipt, err := c.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
return cb(0, nil, cid.Undef, err)
|
||||
}
|
||||
return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil)
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) GetMinerInfo(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*storagemarket.StorageProviderInfo, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(encodedTs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mi, err := c.StateMinerInfo(ctx, addr, tsk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := utils.NewStorageProviderInfo(addr, mi.Worker, mi.SectorSize, *mi.PeerId, mi.Multiaddrs)
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (c *ClientNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) {
|
||||
signer, err := c.StateAccountKey(ctx, signer, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localSignature, err := c.Wallet.WalletSign(ctx, signer, b, api.MsgMeta{
|
||||
Type: api.MTUnknown, // TODO: pass type here
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return localSignature, nil
|
||||
}
|
||||
|
||||
var _ storagemarket.StorageClientNode = &ClientNodeAdapter{}
|
||||
@ -1,102 +0,0 @@
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
blockstore "github.com/ipfs/boxo/blockstore"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/stores"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
// ProxyBlockstoreAccessor is an accessor that returns a fixed blockstore.
|
||||
// To be used in combination with IPFS integration.
|
||||
type ProxyBlockstoreAccessor struct {
|
||||
Blockstore blockstore.Blockstore
|
||||
}
|
||||
|
||||
var _ storagemarket.BlockstoreAccessor = (*ProxyBlockstoreAccessor)(nil)
|
||||
|
||||
func NewFixedBlockstoreAccessor(bs blockstore.Blockstore) storagemarket.BlockstoreAccessor {
|
||||
return &ProxyBlockstoreAccessor{Blockstore: bs}
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Get(cid storagemarket.PayloadCID) (blockstore.Blockstore, error) {
|
||||
return p.Blockstore, nil
|
||||
}
|
||||
|
||||
func (p *ProxyBlockstoreAccessor) Done(cid storagemarket.PayloadCID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportsBlockstoreAccessor is a blockstore accessor backed by the
|
||||
// imports.Manager.
|
||||
type ImportsBlockstoreAccessor struct {
|
||||
m *imports.Manager
|
||||
lk sync.Mutex
|
||||
open map[cid.Cid]struct {
|
||||
st stores.ClosableBlockstore
|
||||
refs int
|
||||
}
|
||||
}
|
||||
|
||||
var _ storagemarket.BlockstoreAccessor = (*ImportsBlockstoreAccessor)(nil)
|
||||
|
||||
func NewImportsBlockstoreAccessor(importmgr *imports.Manager) *ImportsBlockstoreAccessor {
|
||||
return &ImportsBlockstoreAccessor{
|
||||
m: importmgr,
|
||||
open: make(map[cid.Cid]struct {
|
||||
st stores.ClosableBlockstore
|
||||
refs int
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ImportsBlockstoreAccessor) Get(payloadCID storagemarket.PayloadCID) (blockstore.Blockstore, error) {
|
||||
s.lk.Lock()
|
||||
defer s.lk.Unlock()
|
||||
|
||||
e, ok := s.open[payloadCID]
|
||||
if ok {
|
||||
e.refs++
|
||||
return e.st, nil
|
||||
}
|
||||
|
||||
path, err := s.m.CARPathFor(payloadCID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to get client blockstore for root %s: %w", payloadCID, err)
|
||||
}
|
||||
if path == "" {
|
||||
return nil, xerrors.Errorf("no client blockstore for root %s", payloadCID)
|
||||
}
|
||||
ret, err := stores.ReadOnlyFilestore(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e.st = ret
|
||||
s.open[payloadCID] = e
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *ImportsBlockstoreAccessor) Done(payloadCID storagemarket.PayloadCID) error {
|
||||
s.lk.Lock()
|
||||
defer s.lk.Unlock()
|
||||
|
||||
e, ok := s.open[payloadCID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
e.refs--
|
||||
if e.refs == 0 {
|
||||
if err := e.st.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
delete(s.open, payloadCID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1,466 +0,0 @@
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/ctladdr"
|
||||
)
|
||||
|
||||
type dealPublisherAPI interface {
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error)
|
||||
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error)
|
||||
|
||||
WalletBalance(context.Context, address.Address) (types.BigInt, error)
|
||||
WalletHas(context.Context, address.Address) (bool, error)
|
||||
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
||||
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
||||
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error)
|
||||
}
|
||||
|
||||
// DealPublisher batches deal publishing so that many deals can be included in
|
||||
// a single publish message. This saves gas for miners that publish deals
|
||||
// frequently.
|
||||
// When a deal is submitted, the DealPublisher waits a configurable amount of
|
||||
// time for other deals to be submitted before sending the publish message.
|
||||
// There is a configurable maximum number of deals that can be included in one
|
||||
// message. When the limit is reached the DealPublisher immediately submits a
|
||||
// publish message with all deals in the queue.
|
||||
type DealPublisher struct {
|
||||
api dealPublisherAPI
|
||||
as *ctladdr.AddressSelector
|
||||
|
||||
ctx context.Context
|
||||
Shutdown context.CancelFunc
|
||||
|
||||
maxDealsPerPublishMsg uint64
|
||||
publishPeriod time.Duration
|
||||
publishSpec *api.MessageSendSpec
|
||||
|
||||
lk sync.Mutex
|
||||
pending []*pendingDeal
|
||||
cancelWaitForMoreDeals context.CancelFunc
|
||||
publishPeriodStart time.Time
|
||||
startEpochSealingBuffer abi.ChainEpoch
|
||||
}
|
||||
|
||||
// A deal that is queued to be published
|
||||
type pendingDeal struct {
|
||||
ctx context.Context
|
||||
deal market.ClientDealProposal
|
||||
Result chan publishResult
|
||||
}
|
||||
|
||||
// The result of publishing a deal
|
||||
type publishResult struct {
|
||||
msgCid cid.Cid
|
||||
err error
|
||||
}
|
||||
|
||||
func newPendingDeal(ctx context.Context, deal market.ClientDealProposal) *pendingDeal {
|
||||
return &pendingDeal{
|
||||
ctx: ctx,
|
||||
deal: deal,
|
||||
Result: make(chan publishResult),
|
||||
}
|
||||
}
|
||||
|
||||
type PublishMsgConfig struct {
|
||||
// The amount of time to wait for more deals to arrive before
|
||||
// publishing
|
||||
Period time.Duration
|
||||
// The maximum number of deals to include in a single PublishStorageDeals
|
||||
// message
|
||||
MaxDealsPerMsg uint64
|
||||
// Minimum start epoch buffer to give time for sealing of sector with deal
|
||||
StartEpochSealingBuffer uint64
|
||||
}
|
||||
|
||||
func NewDealPublisher(
|
||||
feeConfig *config.MinerFeeConfig,
|
||||
publishMsgCfg PublishMsgConfig,
|
||||
) func(lc fx.Lifecycle, full api.FullNode, as *ctladdr.AddressSelector) *DealPublisher {
|
||||
return func(lc fx.Lifecycle, full api.FullNode, as *ctladdr.AddressSelector) *DealPublisher {
|
||||
maxFee := abi.NewTokenAmount(0)
|
||||
if feeConfig != nil {
|
||||
maxFee = abi.TokenAmount(feeConfig.MaxPublishDealsFee)
|
||||
}
|
||||
publishSpec := &api.MessageSendSpec{MaxFee: maxFee}
|
||||
dp := newDealPublisher(full, as, publishMsgCfg, publishSpec)
|
||||
lc.Append(fx.Hook{
|
||||
OnStop: func(ctx context.Context) error {
|
||||
dp.Shutdown()
|
||||
return nil
|
||||
},
|
||||
})
|
||||
return dp
|
||||
}
|
||||
}
|
||||
|
||||
func newDealPublisher(
|
||||
dpapi dealPublisherAPI,
|
||||
as *ctladdr.AddressSelector,
|
||||
publishMsgCfg PublishMsgConfig,
|
||||
publishSpec *api.MessageSendSpec,
|
||||
) *DealPublisher {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &DealPublisher{
|
||||
api: dpapi,
|
||||
as: as,
|
||||
ctx: ctx,
|
||||
Shutdown: cancel,
|
||||
maxDealsPerPublishMsg: publishMsgCfg.MaxDealsPerMsg,
|
||||
publishPeriod: publishMsgCfg.Period,
|
||||
startEpochSealingBuffer: abi.ChainEpoch(publishMsgCfg.StartEpochSealingBuffer),
|
||||
publishSpec: publishSpec,
|
||||
}
|
||||
}
|
||||
|
||||
// PendingDeals returns the list of deals that are queued up to be published
|
||||
func (p *DealPublisher) PendingDeals() api.PendingDealInfo {
|
||||
p.lk.Lock()
|
||||
defer p.lk.Unlock()
|
||||
|
||||
// Filter out deals whose context has been cancelled
|
||||
deals := make([]*pendingDeal, 0, len(p.pending))
|
||||
for _, dl := range p.pending {
|
||||
if dl.ctx.Err() == nil {
|
||||
deals = append(deals, dl)
|
||||
}
|
||||
}
|
||||
|
||||
pending := make([]market.ClientDealProposal, len(deals))
|
||||
for i, deal := range deals {
|
||||
pending[i] = deal.deal
|
||||
}
|
||||
|
||||
return api.PendingDealInfo{
|
||||
Deals: pending,
|
||||
PublishPeriodStart: p.publishPeriodStart,
|
||||
PublishPeriod: p.publishPeriod,
|
||||
}
|
||||
}
|
||||
|
||||
// ForcePublishPendingDeals publishes all pending deals without waiting for
|
||||
// the publish period to elapse
|
||||
func (p *DealPublisher) ForcePublishPendingDeals() {
|
||||
p.lk.Lock()
|
||||
defer p.lk.Unlock()
|
||||
|
||||
log.Infof("force publishing deals")
|
||||
p.publishAllDeals()
|
||||
}
|
||||
|
||||
func (p *DealPublisher) Publish(ctx context.Context, deal market.ClientDealProposal) (cid.Cid, error) {
|
||||
pdeal := newPendingDeal(ctx, deal)
|
||||
|
||||
// Add the deal to the queue
|
||||
p.processNewDeal(pdeal)
|
||||
|
||||
// Wait for the deal to be submitted
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return cid.Undef, ctx.Err()
|
||||
case res := <-pdeal.Result:
|
||||
return res.msgCid, res.err
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DealPublisher) processNewDeal(pdeal *pendingDeal) {
|
||||
p.lk.Lock()
|
||||
defer p.lk.Unlock()
|
||||
|
||||
// Filter out any cancelled deals
|
||||
p.filterCancelledDeals()
|
||||
|
||||
// If all deals have been cancelled, clear the wait-for-deals timer
|
||||
if len(p.pending) == 0 && p.cancelWaitForMoreDeals != nil {
|
||||
p.cancelWaitForMoreDeals()
|
||||
p.cancelWaitForMoreDeals = nil
|
||||
}
|
||||
|
||||
// Make sure the new deal hasn't been cancelled
|
||||
if pdeal.ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
pdealPropCid, err := pdeal.deal.Proposal.Cid()
|
||||
if err != nil {
|
||||
log.Warn("failed to calculate proposal CID for new pending Deal with piece cid %s", pdeal.deal.Proposal.PieceCID)
|
||||
return
|
||||
}
|
||||
|
||||
// Sanity check that new deal isn't already in the queue
|
||||
for _, pd := range p.pending {
|
||||
pdPropCid, err := pd.deal.Proposal.Cid()
|
||||
if err != nil {
|
||||
log.Warn("failed to calculate proposal CID for pending Deal already in publish queue with piece cid %s", pd.deal.Proposal.PieceCID)
|
||||
return
|
||||
}
|
||||
|
||||
if pdPropCid.Equals(pdealPropCid) {
|
||||
log.Warn("tried to process new pending deal with piece CID %s that is already in publish queue; returning", pdeal.deal.Proposal.PieceCID)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new deal to the queue
|
||||
p.pending = append(p.pending, pdeal)
|
||||
log.Infof("add deal with piece CID %s to publish deals queue - %d deals in queue (max queue size %d)",
|
||||
pdeal.deal.Proposal.PieceCID, len(p.pending), p.maxDealsPerPublishMsg)
|
||||
|
||||
// If the maximum number of deals per message has been reached or we're not batching, send a
|
||||
// publish message
|
||||
if uint64(len(p.pending)) >= p.maxDealsPerPublishMsg || p.publishPeriod == 0 {
|
||||
log.Infof("publish deals queue has reached max size of %d, publishing deals", p.maxDealsPerPublishMsg)
|
||||
p.publishAllDeals()
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise wait for more deals to arrive or the timeout to be reached
|
||||
p.waitForMoreDeals()
|
||||
}
|
||||
|
||||
func (p *DealPublisher) waitForMoreDeals() {
|
||||
// Check if we're already waiting for deals
|
||||
if !p.publishPeriodStart.IsZero() {
|
||||
elapsed := build.Clock.Since(p.publishPeriodStart)
|
||||
log.Infof("%s elapsed of / %s until publish deals queue is published",
|
||||
elapsed, p.publishPeriod)
|
||||
return
|
||||
}
|
||||
|
||||
// Set a timeout to wait for more deals to arrive
|
||||
log.Infof("waiting publish deals queue period of %s before publishing", p.publishPeriod)
|
||||
ctx, cancel := context.WithCancel(p.ctx)
|
||||
|
||||
// Create the timer _before_ taking the current time so publishPeriod+timeout is always >=
|
||||
// the actual timer timeout.
|
||||
timer := build.Clock.Timer(p.publishPeriod)
|
||||
|
||||
p.publishPeriodStart = build.Clock.Now()
|
||||
p.cancelWaitForMoreDeals = cancel
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
p.lk.Lock()
|
||||
defer p.lk.Unlock()
|
||||
|
||||
// The timeout has expired so publish all pending deals
|
||||
log.Infof("publish deals queue period of %s has expired, publishing deals", p.publishPeriod)
|
||||
p.publishAllDeals()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (p *DealPublisher) publishAllDeals() {
|
||||
// If the timeout hasn't yet been cancelled, cancel it
|
||||
if p.cancelWaitForMoreDeals != nil {
|
||||
p.cancelWaitForMoreDeals()
|
||||
p.cancelWaitForMoreDeals = nil
|
||||
p.publishPeriodStart = time.Time{}
|
||||
}
|
||||
|
||||
// Filter out any deals that have been cancelled
|
||||
p.filterCancelledDeals()
|
||||
deals := p.pending
|
||||
p.pending = nil
|
||||
|
||||
// Send the publish message
|
||||
go p.publishReady(deals)
|
||||
}
|
||||
|
||||
func (p *DealPublisher) publishReady(ready []*pendingDeal) {
|
||||
if len(ready) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// onComplete is called when the publish message has been sent or there
|
||||
// was an error
|
||||
onComplete := func(pd *pendingDeal, msgCid cid.Cid, err error) {
|
||||
// Send the publish result on the pending deal's Result channel
|
||||
res := publishResult{
|
||||
msgCid: msgCid,
|
||||
err: err,
|
||||
}
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
case <-pd.ctx.Done():
|
||||
case pd.Result <- res:
|
||||
}
|
||||
}
|
||||
|
||||
// Validate each deal to make sure it can be published
|
||||
validated := make([]*pendingDeal, 0, len(ready))
|
||||
deals := make([]market.ClientDealProposal, 0, len(ready))
|
||||
for _, pd := range ready {
|
||||
// Validate the deal
|
||||
if err := p.validateDeal(pd.deal); err != nil {
|
||||
// Validation failed, complete immediately with an error
|
||||
go onComplete(pd, cid.Undef, xerrors.Errorf("publish validation failed: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
validated = append(validated, pd)
|
||||
deals = append(deals, pd.deal)
|
||||
}
|
||||
|
||||
// Send the publish message
|
||||
msgCid, err := p.publishDealProposals(deals)
|
||||
|
||||
// Signal that each deal has been published
|
||||
for _, pd := range validated {
|
||||
go onComplete(pd, msgCid, err)
|
||||
}
|
||||
}
|
||||
|
||||
// validateDeal checks that the deal proposal start epoch hasn't already
|
||||
// elapsed
|
||||
func (p *DealPublisher) validateDeal(deal market.ClientDealProposal) error {
|
||||
start := time.Now()
|
||||
|
||||
pcid, err := deal.Proposal.Cid()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("computing proposal cid: %w", err)
|
||||
}
|
||||
|
||||
head, err := p.api.ChainHead(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if head.Height()+p.startEpochSealingBuffer > deal.Proposal.StartEpoch {
|
||||
return xerrors.Errorf(
|
||||
"cannot publish deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d",
|
||||
deal.Proposal.PieceCID, head.Height(), deal.Proposal.StartEpoch)
|
||||
}
|
||||
|
||||
mi, err := p.api.StateMinerInfo(p.ctx, deal.Proposal.Provider, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting provider info: %w", err)
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&market.PublishStorageDealsParams{
|
||||
Deals: []market.ClientDealProposal{deal},
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err)
|
||||
}
|
||||
|
||||
addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("selecting address for publishing deals: %w", err)
|
||||
}
|
||||
|
||||
res, err := p.api.StateCall(p.ctx, &types.Message{
|
||||
To: builtin.StorageMarketActorAddr,
|
||||
From: addr,
|
||||
Value: types.NewInt(0),
|
||||
Method: builtin.MethodsMarket.PublishStorageDeals,
|
||||
Params: params,
|
||||
}, head.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("simulating deal publish message: %w", err)
|
||||
}
|
||||
if res.MsgRct.ExitCode != exitcode.Ok {
|
||||
return xerrors.Errorf("simulating deal publish message: non-zero exitcode %s; message: %s", res.MsgRct.ExitCode, res.Error)
|
||||
}
|
||||
|
||||
took := time.Now().Sub(start)
|
||||
log.Infow("validating deal", "took", took, "proposal", pcid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sends the publish message
|
||||
func (p *DealPublisher) publishDealProposals(deals []market.ClientDealProposal) (cid.Cid, error) {
|
||||
if len(deals) == 0 {
|
||||
return cid.Undef, nil
|
||||
}
|
||||
|
||||
log.Infof("publishing %d deals in publish deals queue with piece CIDs: %s", len(deals), pieceCids(deals))
|
||||
|
||||
provider := deals[0].Proposal.Provider
|
||||
for _, dl := range deals {
|
||||
if dl.Proposal.Provider != provider {
|
||||
msg := fmt.Sprintf("publishing %d deals failed: ", len(deals)) +
|
||||
"not all deals are for same provider: " +
|
||||
fmt.Sprintf("deal with piece CID %s is for provider %s ", deals[0].Proposal.PieceCID, deals[0].Proposal.Provider) +
|
||||
fmt.Sprintf("but deal with piece CID %s is for provider %s", dl.Proposal.PieceCID, dl.Proposal.Provider)
|
||||
return cid.Undef, xerrors.Errorf(msg)
|
||||
}
|
||||
}
|
||||
|
||||
mi, err := p.api.StateMinerInfo(p.ctx, provider, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
params, err := actors.SerializeParams(&market.PublishStorageDealsParams{
|
||||
Deals: deals,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("serializing PublishStorageDeals params failed: %w", err)
|
||||
}
|
||||
|
||||
addr, _, err := p.as.AddressFor(p.ctx, p.api, mi, api.DealPublishAddr, big.Zero(), big.Zero())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("selecting address for publishing deals: %w", err)
|
||||
}
|
||||
|
||||
smsg, err := p.api.MpoolPushMessage(p.ctx, &types.Message{
|
||||
To: builtin.StorageMarketActorAddr,
|
||||
From: addr,
|
||||
Value: types.NewInt(0),
|
||||
Method: builtin.MethodsMarket.PublishStorageDeals,
|
||||
Params: params,
|
||||
}, p.publishSpec)
|
||||
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func pieceCids(deals []market.ClientDealProposal) string {
|
||||
cids := make([]string, 0, len(deals))
|
||||
for _, dl := range deals {
|
||||
cids = append(cids, dl.Proposal.PieceCID.String())
|
||||
}
|
||||
return strings.Join(cids, ", ")
|
||||
}
|
||||
|
||||
// filter out deals that have been cancelled
|
||||
func (p *DealPublisher) filterCancelledDeals() {
|
||||
filtered := p.pending[:0]
|
||||
for _, pd := range p.pending {
|
||||
if pd.ctx.Err() != nil {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, pd)
|
||||
}
|
||||
p.pending = filtered
|
||||
}
|
||||
@ -1,423 +0,0 @@
|
||||
// stm: #unit
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/raulk/clock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func TestDealPublisher(t *testing.T) {
|
||||
//stm: @MARKET_DEAL_PUBLISHER_PUBLISH_001, @MARKET_DEAL_PUBLISHER_GET_PENDING_DEALS_001
|
||||
oldClock := build.Clock
|
||||
t.Cleanup(func() { build.Clock = oldClock })
|
||||
mc := clock.NewMock()
|
||||
build.Clock = mc
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
publishPeriod time.Duration
|
||||
maxDealsPerMsg uint64
|
||||
dealCountWithinPublishPeriod int
|
||||
ctxCancelledWithinPublishPeriod int
|
||||
expiredDeals int
|
||||
dealCountAfterPublishPeriod int
|
||||
expectedDealsPerMsg []int
|
||||
failOne bool
|
||||
}{{
|
||||
name: "publish one deal within publish period",
|
||||
publishPeriod: 10 * time.Millisecond,
|
||||
maxDealsPerMsg: 5,
|
||||
dealCountWithinPublishPeriod: 1,
|
||||
dealCountAfterPublishPeriod: 0,
|
||||
expectedDealsPerMsg: []int{1},
|
||||
}, {
|
||||
name: "publish two deals within publish period",
|
||||
publishPeriod: 10 * time.Millisecond,
|
||||
maxDealsPerMsg: 5,
|
||||
dealCountWithinPublishPeriod: 2,
|
||||
dealCountAfterPublishPeriod: 0,
|
||||
expectedDealsPerMsg: []int{2},
|
||||
}, {
|
||||
name: "publish one deal within publish period, and one after",
|
||||
publishPeriod: 10 * time.Millisecond,
|
||||
maxDealsPerMsg: 5,
|
||||
dealCountWithinPublishPeriod: 1,
|
||||
dealCountAfterPublishPeriod: 1,
|
||||
expectedDealsPerMsg: []int{1, 1},
|
||||
}, {
|
||||
name: "publish deals that exceed max deals per message within publish period, and one after",
|
||||
publishPeriod: 10 * time.Millisecond,
|
||||
maxDealsPerMsg: 2,
|
||||
dealCountWithinPublishPeriod: 3,
|
||||
dealCountAfterPublishPeriod: 1,
|
||||
expectedDealsPerMsg: []int{2, 1, 1},
|
||||
}, {
|
||||
name: "ignore deals with cancelled context",
|
||||
publishPeriod: 10 * time.Millisecond,
|
||||
maxDealsPerMsg: 5,
|
||||
dealCountWithinPublishPeriod: 2,
|
||||
ctxCancelledWithinPublishPeriod: 2,
|
||||
dealCountAfterPublishPeriod: 1,
|
||||
expectedDealsPerMsg: []int{2, 1},
|
||||
}, {
|
||||
name: "ignore expired deals",
|
||||
publishPeriod: 10 * time.Millisecond,
|
||||
maxDealsPerMsg: 5,
|
||||
dealCountWithinPublishPeriod: 2,
|
||||
expiredDeals: 2,
|
||||
dealCountAfterPublishPeriod: 1,
|
||||
expectedDealsPerMsg: []int{2, 1},
|
||||
}, {
|
||||
name: "zero config",
|
||||
publishPeriod: 0,
|
||||
maxDealsPerMsg: 0,
|
||||
dealCountWithinPublishPeriod: 2,
|
||||
ctxCancelledWithinPublishPeriod: 0,
|
||||
dealCountAfterPublishPeriod: 2,
|
||||
expectedDealsPerMsg: []int{1, 1, 1, 1},
|
||||
}, {
|
||||
name: "one deal failing doesn't fail the entire batch",
|
||||
publishPeriod: 10 * time.Millisecond,
|
||||
maxDealsPerMsg: 5,
|
||||
dealCountWithinPublishPeriod: 2,
|
||||
dealCountAfterPublishPeriod: 0,
|
||||
failOne: true,
|
||||
expectedDealsPerMsg: []int{1},
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mc.Set(time.Now())
|
||||
dpapi := newDPAPI(t)
|
||||
|
||||
// Create a deal publisher
|
||||
dp := newDealPublisher(dpapi, nil, PublishMsgConfig{
|
||||
Period: tc.publishPeriod,
|
||||
MaxDealsPerMsg: tc.maxDealsPerMsg,
|
||||
}, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)})
|
||||
|
||||
// Keep a record of the deals that were submitted to be published
|
||||
var dealsToPublish []markettypes.ClientDealProposal
|
||||
|
||||
// Publish deals within publish period
|
||||
for i := 0; i < tc.dealCountWithinPublishPeriod; i++ {
|
||||
if tc.failOne && i == 1 {
|
||||
publishDeal(t, dp, i, false, false)
|
||||
} else {
|
||||
deal := publishDeal(t, dp, 0, false, false)
|
||||
dealsToPublish = append(dealsToPublish, deal)
|
||||
}
|
||||
}
|
||||
for i := 0; i < tc.ctxCancelledWithinPublishPeriod; i++ {
|
||||
publishDeal(t, dp, 0, true, false)
|
||||
}
|
||||
for i := 0; i < tc.expiredDeals; i++ {
|
||||
publishDeal(t, dp, 0, false, true)
|
||||
}
|
||||
|
||||
// Wait until publish period has elapsed
|
||||
if tc.publishPeriod > 0 {
|
||||
// If we expect deals to get stuck in the queue, wait until that happens
|
||||
if tc.maxDealsPerMsg != 0 && tc.dealCountWithinPublishPeriod%int(tc.maxDealsPerMsg) != 0 {
|
||||
require.Eventually(t, func() bool {
|
||||
dp.lk.Lock()
|
||||
defer dp.lk.Unlock()
|
||||
return !dp.publishPeriodStart.IsZero()
|
||||
}, time.Second, time.Millisecond, "failed to queue deals")
|
||||
}
|
||||
|
||||
// Then wait to send
|
||||
require.Eventually(t, func() bool {
|
||||
dp.lk.Lock()
|
||||
defer dp.lk.Unlock()
|
||||
|
||||
// Advance if necessary.
|
||||
if mc.Since(dp.publishPeriodStart) <= tc.publishPeriod {
|
||||
dp.lk.Unlock()
|
||||
mc.Set(dp.publishPeriodStart.Add(tc.publishPeriod + 1))
|
||||
dp.lk.Lock()
|
||||
}
|
||||
|
||||
return len(dp.pending) == 0
|
||||
}, time.Second, time.Millisecond, "failed to send pending messages")
|
||||
}
|
||||
|
||||
// Publish deals after publish period
|
||||
for i := 0; i < tc.dealCountAfterPublishPeriod; i++ {
|
||||
deal := publishDeal(t, dp, 0, false, false)
|
||||
dealsToPublish = append(dealsToPublish, deal)
|
||||
}
|
||||
|
||||
if tc.publishPeriod > 0 && tc.dealCountAfterPublishPeriod > 0 {
|
||||
require.Eventually(t, func() bool {
|
||||
dp.lk.Lock()
|
||||
defer dp.lk.Unlock()
|
||||
if mc.Since(dp.publishPeriodStart) <= tc.publishPeriod {
|
||||
dp.lk.Unlock()
|
||||
mc.Set(dp.publishPeriodStart.Add(tc.publishPeriod + 1))
|
||||
dp.lk.Lock()
|
||||
}
|
||||
return len(dp.pending) == 0
|
||||
}, time.Second, time.Millisecond, "failed to send pending messages")
|
||||
}
|
||||
|
||||
checkPublishedDeals(t, dpapi, dealsToPublish, tc.expectedDealsPerMsg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestForcePublish(t *testing.T) {
|
||||
//stm: @MARKET_DEAL_PUBLISHER_PUBLISH_001, @MARKET_DEAL_PUBLISHER_GET_PENDING_DEALS_001
|
||||
//stm: @MARKET_DEAL_PUBLISHER_FORCE_PUBLISH_ALL_001
|
||||
dpapi := newDPAPI(t)
|
||||
|
||||
// Create a deal publisher
|
||||
start := build.Clock.Now()
|
||||
publishPeriod := time.Hour
|
||||
dp := newDealPublisher(dpapi, nil, PublishMsgConfig{
|
||||
Period: publishPeriod,
|
||||
MaxDealsPerMsg: 10,
|
||||
}, &api.MessageSendSpec{MaxFee: abi.NewTokenAmount(1)})
|
||||
|
||||
// Queue three deals for publishing, one with a cancelled context
|
||||
var dealsToPublish []markettypes.ClientDealProposal
|
||||
// 1. Regular deal
|
||||
deal := publishDeal(t, dp, 0, false, false)
|
||||
dealsToPublish = append(dealsToPublish, deal)
|
||||
// 2. Deal with cancelled context
|
||||
publishDeal(t, dp, 0, true, false)
|
||||
// 3. Regular deal
|
||||
deal = publishDeal(t, dp, 0, false, false)
|
||||
dealsToPublish = append(dealsToPublish, deal)
|
||||
|
||||
// Allow a moment for them to be queued
|
||||
build.Clock.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Should be two deals in the pending deals list
|
||||
// (deal with cancelled context is ignored)
|
||||
pendingInfo := dp.PendingDeals()
|
||||
require.Len(t, pendingInfo.Deals, 2)
|
||||
require.Equal(t, publishPeriod, pendingInfo.PublishPeriod)
|
||||
require.True(t, pendingInfo.PublishPeriodStart.After(start))
|
||||
require.True(t, pendingInfo.PublishPeriodStart.Before(build.Clock.Now()))
|
||||
|
||||
// Force publish all pending deals
|
||||
dp.ForcePublishPendingDeals()
|
||||
|
||||
// Should be no pending deals
|
||||
pendingInfo = dp.PendingDeals()
|
||||
require.Len(t, pendingInfo.Deals, 0)
|
||||
|
||||
// Make sure the expected deals were published
|
||||
checkPublishedDeals(t, dpapi, dealsToPublish, []int{2})
|
||||
}
|
||||
|
||||
func publishDeal(t *testing.T, dp *DealPublisher, invalid int, ctxCancelled bool, expired bool) markettypes.ClientDealProposal {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
pctx := ctx
|
||||
if ctxCancelled {
|
||||
pctx, cancel = context.WithCancel(ctx)
|
||||
cancel()
|
||||
}
|
||||
|
||||
startEpoch := abi.ChainEpoch(20)
|
||||
if expired {
|
||||
startEpoch = abi.ChainEpoch(5)
|
||||
}
|
||||
deal := markettypes.ClientDealProposal{
|
||||
Proposal: markettypes.DealProposal{
|
||||
PieceCID: generateCids(1)[0],
|
||||
Client: getClientActor(t),
|
||||
Provider: getProviderActor(t),
|
||||
StartEpoch: startEpoch,
|
||||
EndEpoch: abi.ChainEpoch(120),
|
||||
PieceSize: abi.PaddedPieceSize(invalid), // pass invalid into StateCall below
|
||||
},
|
||||
ClientSignature: crypto.Signature{
|
||||
Type: crypto.SigTypeSecp256k1,
|
||||
Data: []byte("signature data"),
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
_, err := dp.Publish(pctx, deal)
|
||||
|
||||
// If the test has completed just bail out without checking for errors
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if ctxCancelled || expired || invalid == 1 {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return deal
|
||||
}
|
||||
|
||||
func checkPublishedDeals(t *testing.T, dpapi *dpAPI, dealsToPublish []markettypes.ClientDealProposal, expectedDealsPerMsg []int) {
|
||||
// For each message that was expected to be sent
|
||||
var publishedDeals []markettypes.ClientDealProposal
|
||||
for _, expectedDealsInMsg := range expectedDealsPerMsg {
|
||||
// Should have called StateMinerInfo with the provider address
|
||||
stateMinerInfoAddr := <-dpapi.stateMinerInfoCalls
|
||||
require.Equal(t, getProviderActor(t), stateMinerInfoAddr)
|
||||
|
||||
// Check the fields of the message that was sent
|
||||
msg := <-dpapi.pushedMsgs
|
||||
require.Equal(t, getWorkerActor(t), msg.From)
|
||||
require.Equal(t, market.Address, msg.To)
|
||||
require.Equal(t, market.Methods.PublishStorageDeals, msg.Method)
|
||||
|
||||
// Check that the expected number of deals was included in the message
|
||||
var params markettypes.PublishStorageDealsParams
|
||||
err := params.UnmarshalCBOR(bytes.NewReader(msg.Params))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, params.Deals, expectedDealsInMsg)
|
||||
|
||||
// Keep track of the deals that were sent
|
||||
for _, d := range params.Deals {
|
||||
publishedDeals = append(publishedDeals, d)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that all deals that were submitted to be published were
|
||||
// sent out (we do this by ensuring all the piece CIDs are present)
|
||||
require.True(t, matchPieceCids(publishedDeals, dealsToPublish))
|
||||
}
|
||||
|
||||
func matchPieceCids(sent []markettypes.ClientDealProposal, exp []markettypes.ClientDealProposal) bool {
|
||||
cidsA := dealPieceCids(sent)
|
||||
cidsB := dealPieceCids(exp)
|
||||
|
||||
if len(cidsA) != len(cidsB) {
|
||||
return false
|
||||
}
|
||||
|
||||
s1 := cid.NewSet()
|
||||
for _, c := range cidsA {
|
||||
s1.Add(c)
|
||||
}
|
||||
|
||||
for _, c := range cidsB {
|
||||
if !s1.Has(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func dealPieceCids(deals []markettypes.ClientDealProposal) []cid.Cid {
|
||||
cids := make([]cid.Cid, 0, len(deals))
|
||||
for _, dl := range deals {
|
||||
cids = append(cids, dl.Proposal.PieceCID)
|
||||
}
|
||||
return cids
|
||||
}
|
||||
|
||||
type dpAPI struct {
|
||||
t *testing.T
|
||||
worker address.Address
|
||||
|
||||
stateMinerInfoCalls chan address.Address
|
||||
pushedMsgs chan *types.Message
|
||||
}
|
||||
|
||||
func newDPAPI(t *testing.T) *dpAPI {
|
||||
return &dpAPI{
|
||||
t: t,
|
||||
worker: getWorkerActor(t),
|
||||
stateMinerInfoCalls: make(chan address.Address, 128),
|
||||
pushedMsgs: make(chan *types.Message, 128),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dpAPI) ChainHead(ctx context.Context) (*types.TipSet, error) {
|
||||
dummyCid, err := cid.Parse("bafkqaaa")
|
||||
require.NoError(d.t, err)
|
||||
return types.NewTipSet([]*types.BlockHeader{{
|
||||
Miner: tutils.NewActorAddr(d.t, "miner"),
|
||||
Height: abi.ChainEpoch(10),
|
||||
ParentStateRoot: dummyCid,
|
||||
Messages: dummyCid,
|
||||
ParentMessageReceipts: dummyCid,
|
||||
BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS},
|
||||
BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS},
|
||||
}})
|
||||
}
|
||||
|
||||
func (d *dpAPI) StateMinerInfo(ctx context.Context, address address.Address, key types.TipSetKey) (api.MinerInfo, error) {
|
||||
d.stateMinerInfoCalls <- address
|
||||
return api.MinerInfo{Worker: d.worker}, nil
|
||||
}
|
||||
|
||||
func (d *dpAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) {
|
||||
d.pushedMsgs <- msg
|
||||
return &types.SignedMessage{Message: *msg}, nil
|
||||
}
|
||||
|
||||
func (d *dpAPI) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) {
|
||||
panic("don't call me")
|
||||
}
|
||||
|
||||
func (d *dpAPI) WalletHas(ctx context.Context, a address.Address) (bool, error) {
|
||||
panic("don't call me")
|
||||
}
|
||||
|
||||
func (d *dpAPI) StateAccountKey(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) {
|
||||
panic("don't call me")
|
||||
}
|
||||
|
||||
func (d *dpAPI) StateLookupID(ctx context.Context, a address.Address, key types.TipSetKey) (address.Address, error) {
|
||||
panic("don't call me")
|
||||
}
|
||||
|
||||
func (d *dpAPI) StateCall(ctx context.Context, message *types.Message, key types.TipSetKey) (*api.InvocResult, error) {
|
||||
var p markettypes.PublishStorageDealsParams
|
||||
if err := p.UnmarshalCBOR(bytes.NewReader(message.Params)); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal market params: %w", err)
|
||||
}
|
||||
|
||||
exit := exitcode.Ok
|
||||
if p.Deals[0].Proposal.PieceSize == 1 {
|
||||
exit = exitcode.ErrIllegalState
|
||||
}
|
||||
return &api.InvocResult{MsgRct: &types.MessageReceipt{ExitCode: exit}}, nil
|
||||
}
|
||||
|
||||
func getClientActor(t *testing.T) address.Address {
|
||||
return tutils.NewActorAddr(t, "client")
|
||||
}
|
||||
|
||||
func getWorkerActor(t *testing.T) address.Address {
|
||||
return tutils.NewActorAddr(t, "worker")
|
||||
}
|
||||
|
||||
func getProviderActor(t *testing.T) address.Address {
|
||||
return tutils.NewActorAddr(t, "provider")
|
||||
}
|
||||
@ -1,85 +0,0 @@
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
actorsmarket "github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// dealStateMatcher caches the DealStates for the most recent
|
||||
// old/new tipset combination
|
||||
type dealStateMatcher struct {
|
||||
preds *state.StatePredicates
|
||||
|
||||
lk sync.Mutex
|
||||
oldTsk types.TipSetKey
|
||||
newTsk types.TipSetKey
|
||||
oldDealStateRoot actorsmarket.DealStates
|
||||
newDealStateRoot actorsmarket.DealStates
|
||||
}
|
||||
|
||||
func newDealStateMatcher(preds *state.StatePredicates) *dealStateMatcher {
|
||||
return &dealStateMatcher{preds: preds}
|
||||
}
|
||||
|
||||
// matcher returns a function that checks if the state of the given dealID
|
||||
// has changed.
|
||||
// It caches the DealStates for the most recent old/new tipset combination.
|
||||
func (mc *dealStateMatcher) matcher(ctx context.Context, dealID abi.DealID) events.StateMatchFunc {
|
||||
// The function that is called to check if the deal state has changed for
|
||||
// the target deal ID
|
||||
dealStateChangedForID := mc.preds.DealStateChangedForIDs([]abi.DealID{dealID})
|
||||
|
||||
// The match function is called by the events API to check if there's
|
||||
// been a state change for the deal with the target deal ID
|
||||
match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
||||
mc.lk.Lock()
|
||||
defer mc.lk.Unlock()
|
||||
|
||||
// Check if we've already fetched the DealStates for the given tipsets
|
||||
if mc.oldTsk == oldTs.Key() && mc.newTsk == newTs.Key() {
|
||||
// If we fetch the DealStates and there is no difference between
|
||||
// them, they are stored as nil. So we can just bail out.
|
||||
if mc.oldDealStateRoot == nil || mc.newDealStateRoot == nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// Check if the deal state has changed for the target ID
|
||||
return dealStateChangedForID(ctx, mc.oldDealStateRoot, mc.newDealStateRoot)
|
||||
}
|
||||
|
||||
// We haven't already fetched the DealStates for the given tipsets, so
|
||||
// do so now
|
||||
|
||||
// Replace dealStateChangedForID with a function that records the
|
||||
// DealStates so that we can cache them
|
||||
var oldDealStateRootSaved, newDealStateRootSaved actorsmarket.DealStates
|
||||
recorder := func(ctx context.Context, oldDealStateRoot, newDealStateRoot actorsmarket.DealStates) (changed bool, user state.UserData, err error) {
|
||||
// Record DealStates
|
||||
oldDealStateRootSaved = oldDealStateRoot
|
||||
newDealStateRootSaved = newDealStateRoot
|
||||
|
||||
return dealStateChangedForID(ctx, oldDealStateRoot, newDealStateRoot)
|
||||
}
|
||||
|
||||
// Call the match function
|
||||
dealDiff := mc.preds.OnStorageMarketActorChanged(
|
||||
mc.preds.OnDealStateChanged(recorder))
|
||||
matched, data, err := dealDiff(ctx, oldTs.Key(), newTs.Key())
|
||||
|
||||
// Save the recorded DealStates for the tipsets
|
||||
mc.oldTsk = oldTs.Key()
|
||||
mc.newTsk = newTs.Key()
|
||||
mc.oldDealStateRoot = oldDealStateRootSaved
|
||||
mc.newDealStateRoot = newDealStateRootSaved
|
||||
|
||||
return matched, data, err
|
||||
}
|
||||
return match
|
||||
}
|
||||
@ -1,155 +0,0 @@
|
||||
// stm: #unit
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbornode "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
adt2 "github.com/filecoin-project/specs-actors/v2/actors/util/adt"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
test "github.com/filecoin-project/lotus/chain/events/state/mock"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func TestDealStateMatcher(t *testing.T) {
|
||||
//stm: @CHAIN_STATE_GET_ACTOR_001
|
||||
ctx := context.Background()
|
||||
bs := bstore.NewMemorySync()
|
||||
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
|
||||
|
||||
deal1 := &market2.DealState{
|
||||
SectorStartEpoch: 1,
|
||||
LastUpdatedEpoch: 2,
|
||||
}
|
||||
deal2 := &market2.DealState{
|
||||
SectorStartEpoch: 4,
|
||||
LastUpdatedEpoch: 5,
|
||||
}
|
||||
deal3 := &market2.DealState{
|
||||
SectorStartEpoch: 7,
|
||||
LastUpdatedEpoch: 8,
|
||||
}
|
||||
deals1 := map[abi.DealID]*market2.DealState{
|
||||
abi.DealID(1): deal1,
|
||||
}
|
||||
deals2 := map[abi.DealID]*market2.DealState{
|
||||
abi.DealID(1): deal2,
|
||||
}
|
||||
deals3 := map[abi.DealID]*market2.DealState{
|
||||
abi.DealID(1): deal3,
|
||||
}
|
||||
|
||||
deal1StateC := createMarketState(ctx, t, store, deals1)
|
||||
deal2StateC := createMarketState(ctx, t, store, deals2)
|
||||
deal3StateC := createMarketState(ctx, t, store, deals3)
|
||||
|
||||
minerAddr, err := address.NewFromString("t00")
|
||||
require.NoError(t, err)
|
||||
ts1, err := test.MockTipset(minerAddr, 1)
|
||||
require.NoError(t, err)
|
||||
ts2, err := test.MockTipset(minerAddr, 2)
|
||||
require.NoError(t, err)
|
||||
ts3, err := test.MockTipset(minerAddr, 3)
|
||||
require.NoError(t, err)
|
||||
|
||||
api := test.NewMockAPI(bs)
|
||||
api.SetActor(ts1.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal1StateC})
|
||||
api.SetActor(ts2.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal2StateC})
|
||||
api.SetActor(ts3.Key(), &types.Actor{Code: builtin2.StorageMarketActorCodeID, Head: deal3StateC})
|
||||
|
||||
t.Run("caching", func(t *testing.T) {
|
||||
dsm := newDealStateMatcher(state.NewStatePredicates(api))
|
||||
matcher := dsm.matcher(ctx, abi.DealID(1))
|
||||
|
||||
// Call matcher with tipsets that have the same state
|
||||
ok, stateChange, err := matcher(ts1, ts1)
|
||||
require.NoError(t, err)
|
||||
require.False(t, ok)
|
||||
require.Nil(t, stateChange)
|
||||
// Should call StateGetActor once for each tipset
|
||||
require.Equal(t, 2, api.StateGetActorCallCount())
|
||||
|
||||
// Call matcher with tipsets that have different state
|
||||
api.ResetCallCounts()
|
||||
ok, stateChange, err = matcher(ts1, ts2)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, stateChange)
|
||||
// Should call StateGetActor once for each tipset
|
||||
require.Equal(t, 2, api.StateGetActorCallCount())
|
||||
|
||||
// Call matcher again with the same tipsets as above, should be cached
|
||||
api.ResetCallCounts()
|
||||
ok, stateChange, err = matcher(ts1, ts2)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, stateChange)
|
||||
// Should not call StateGetActor (because it should hit the cache)
|
||||
require.Equal(t, 0, api.StateGetActorCallCount())
|
||||
|
||||
// Call matcher with different tipsets, should not be cached
|
||||
api.ResetCallCounts()
|
||||
ok, stateChange, err = matcher(ts2, ts3)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.NotNil(t, stateChange)
|
||||
// Should call StateGetActor once for each tipset
|
||||
require.Equal(t, 2, api.StateGetActorCallCount())
|
||||
})
|
||||
|
||||
t.Run("parallel", func(t *testing.T) {
|
||||
api.ResetCallCounts()
|
||||
dsm := newDealStateMatcher(state.NewStatePredicates(api))
|
||||
matcher := dsm.matcher(ctx, abi.DealID(1))
|
||||
|
||||
// Call matcher with lots of go-routines in parallel
|
||||
var eg errgroup.Group
|
||||
res := make([]struct {
|
||||
ok bool
|
||||
stateChange events.StateChange
|
||||
}, 20)
|
||||
for i := 0; i < len(res); i++ {
|
||||
i := i
|
||||
eg.Go(func() error {
|
||||
ok, stateChange, err := matcher(ts1, ts2)
|
||||
res[i].ok = ok
|
||||
res[i].stateChange = stateChange
|
||||
return err
|
||||
})
|
||||
}
|
||||
err := eg.Wait()
|
||||
require.NoError(t, err)
|
||||
|
||||
// All go-routines should have got the same (cached) result
|
||||
for i := 1; i < len(res); i++ {
|
||||
require.Equal(t, res[i].ok, res[i-1].ok)
|
||||
require.Equal(t, res[i].stateChange, res[i-1].stateChange)
|
||||
}
|
||||
|
||||
// Only one go-routine should have called StateGetActor
|
||||
// (once for each tipset)
|
||||
require.Equal(t, 2, api.StateGetActorCallCount())
|
||||
})
|
||||
}
|
||||
|
||||
func createMarketState(ctx context.Context, t *testing.T, store adt2.Store, deals map[abi.DealID]*market2.DealState) cid.Cid {
|
||||
dealRootCid := test.CreateDealAMT(ctx, t, store, deals)
|
||||
state := test.CreateEmptyMarketState(t, store)
|
||||
state.States = dealRootCid
|
||||
|
||||
stateC, err := store.Put(ctx, state)
|
||||
require.NoError(t, err)
|
||||
return stateC
|
||||
}
|
||||
@ -1,418 +0,0 @@
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
miner2 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
pipeline "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
)
|
||||
|
||||
type eventsCalledAPI interface {
|
||||
Called(ctx context.Context, check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error
|
||||
}
|
||||
|
||||
type dealInfoAPI interface {
|
||||
GetCurrentDealInfo(ctx context.Context, tsk types.TipSetKey, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, error)
|
||||
}
|
||||
|
||||
type diffPreCommitsAPI interface {
|
||||
diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*lminer.PreCommitChanges, error)
|
||||
}
|
||||
|
||||
type SectorCommittedManager struct {
|
||||
ev eventsCalledAPI
|
||||
dealInfo dealInfoAPI
|
||||
dpc diffPreCommitsAPI
|
||||
}
|
||||
|
||||
func NewSectorCommittedManager(ev eventsCalledAPI, tskAPI pipeline.CurrentDealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager {
|
||||
dim := &pipeline.CurrentDealInfoManager{
|
||||
CDAPI: tskAPI,
|
||||
}
|
||||
return newSectorCommittedManager(ev, dim, dpcAPI)
|
||||
}
|
||||
|
||||
func newSectorCommittedManager(ev eventsCalledAPI, dealInfo dealInfoAPI, dpcAPI diffPreCommitsAPI) *SectorCommittedManager {
|
||||
return &SectorCommittedManager{
|
||||
ev: ev,
|
||||
dealInfo: dealInfo,
|
||||
dpc: dpcAPI,
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorPreCommittedCallback) error {
|
||||
// Ensure callback is only called once
|
||||
var once sync.Once
|
||||
cb := func(sectorNumber abi.SectorNumber, isActive bool, err error) {
|
||||
once.Do(func() {
|
||||
callback(sectorNumber, isActive, err)
|
||||
})
|
||||
}
|
||||
|
||||
// First check if the deal is already active, and if so, bail out
|
||||
checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
|
||||
dealInfo, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid)
|
||||
if err != nil {
|
||||
// Note: the error returned from here will end up being returned
|
||||
// from OnDealSectorPreCommitted so no need to call the callback
|
||||
// with the error
|
||||
return false, false, xerrors.Errorf("failed to check deal activity: %w", err)
|
||||
}
|
||||
|
||||
if isActive {
|
||||
// Deal is already active, bail out
|
||||
cb(0, true, nil)
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// Check that precommits which landed between when the deal was published
|
||||
// and now don't already contain the deal we care about.
|
||||
// (this can happen when the precommit lands vary quickly (in tests), or
|
||||
// when the client node was down after the deal was published, and when
|
||||
// the precommit containing it landed on chain)
|
||||
|
||||
diff, err := mgr.dpc.diffPreCommits(ctx, provider, dealInfo.PublishMsgTipSet, ts.Key())
|
||||
if err != nil {
|
||||
return false, false, xerrors.Errorf("failed to diff precommits: %w", err)
|
||||
}
|
||||
|
||||
for _, info := range diff.Added {
|
||||
for _, d := range info.Info.DealIDs {
|
||||
if d == dealInfo.DealID {
|
||||
cb(info.Info.SectorNumber, false, nil)
|
||||
return true, false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Not yet active, start matching against incoming messages
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// Watch for a pre-commit message to the provider.
|
||||
matchEvent := func(msg *types.Message) (bool, error) {
|
||||
matched := msg.To == provider && (msg.Method == builtin.MethodsMiner.PreCommitSector ||
|
||||
msg.Method == builtin.MethodsMiner.PreCommitSectorBatch ||
|
||||
msg.Method == builtin.MethodsMiner.PreCommitSectorBatch2 ||
|
||||
msg.Method == builtin.MethodsMiner.ProveReplicaUpdates)
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// The deal must be accepted by the deal proposal start epoch, so timeout
|
||||
// if the chain reaches that epoch
|
||||
timeoutEpoch := proposal.StartEpoch + 1
|
||||
|
||||
// Check if the message params included the deal ID we're looking for.
|
||||
called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cb(0, false, xerrors.Errorf("handling applied event: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
// If the deal hasn't been activated by the proposed start epoch, the
|
||||
// deal will timeout (when msg == nil it means the timeout epoch was reached)
|
||||
if msg == nil {
|
||||
err = xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Ignore the pre-commit message if it was not executed successfully
|
||||
if rec.ExitCode != 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// When there is a reorg, the deal ID may change, so get the
|
||||
// current deal ID from the publish message CID
|
||||
res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), &proposal, publishCid)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to get dealinfo: %w", err)
|
||||
}
|
||||
|
||||
// If this is a replica update method that succeeded the deal is active
|
||||
if msg.Method == builtin.MethodsMiner.ProveReplicaUpdates {
|
||||
sn, err := dealSectorInReplicaUpdateSuccess(msg, rec, res)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if sn != nil {
|
||||
cb(*sn, true, nil)
|
||||
return false, nil
|
||||
}
|
||||
// Didn't find the deal ID in this message, so keep looking
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Extract the message parameters
|
||||
sn, err := dealSectorInPreCommitMsg(msg, res)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to extract message params: %w", err)
|
||||
}
|
||||
|
||||
if sn != nil {
|
||||
cb(*sn, false, nil)
|
||||
}
|
||||
|
||||
// Didn't find the deal ID in this message, so keep looking
|
||||
return true, nil
|
||||
}
|
||||
|
||||
revert := func(ctx context.Context, ts *types.TipSet) error {
|
||||
log.Warn("deal pre-commit reverted; TODO: actually handle this!")
|
||||
// TODO: Just go back to DealSealing?
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil {
|
||||
return xerrors.Errorf("failed to set up called handler: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, provider address.Address, sectorNumber abi.SectorNumber, proposal market.DealProposal, publishCid cid.Cid, callback storagemarket.DealSectorCommittedCallback) error {
|
||||
// Ensure callback is only called once
|
||||
var once sync.Once
|
||||
cb := func(err error) {
|
||||
once.Do(func() {
|
||||
callback(err)
|
||||
})
|
||||
}
|
||||
|
||||
// First check if the deal is already active, and if so, bail out
|
||||
checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
|
||||
_, isActive, err := mgr.checkIfDealAlreadyActive(ctx, ts, &proposal, publishCid)
|
||||
if err != nil {
|
||||
// Note: the error returned from here will end up being returned
|
||||
// from OnDealSectorCommitted so no need to call the callback
|
||||
// with the error
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if isActive {
|
||||
// Deal is already active, bail out
|
||||
cb(nil)
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// Not yet active, start matching against incoming messages
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// Match a prove-commit sent to the provider with the given sector number
|
||||
matchEvent := func(msg *types.Message) (matched bool, err error) {
|
||||
if msg.To != provider {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return sectorInCommitMsg(msg, sectorNumber)
|
||||
}
|
||||
|
||||
// The deal must be accepted by the deal proposal start epoch, so timeout
|
||||
// if the chain reaches that epoch
|
||||
timeoutEpoch := proposal.StartEpoch + 1
|
||||
|
||||
called := func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cb(xerrors.Errorf("handling applied event: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
// If the deal hasn't been activated by the proposed start epoch, the
|
||||
// deal will timeout (when msg == nil it means the timeout epoch was reached)
|
||||
if msg == nil {
|
||||
err := xerrors.Errorf("deal with piece CID %s was not activated by proposed deal start epoch %d", proposal.PieceCID, proposal.StartEpoch)
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Ignore the prove-commit message if it was not executed successfully
|
||||
if rec.ExitCode != 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Get the deal info
|
||||
res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), &proposal, publishCid)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("failed to look up deal on chain: %w", err)
|
||||
}
|
||||
|
||||
// Make sure the deal is active
|
||||
if res.MarketDeal.State.SectorStartEpoch < 1 {
|
||||
return false, xerrors.Errorf("deal wasn't active: deal=%d, parentState=%s, h=%d", res.DealID, ts.ParentState(), ts.Height())
|
||||
}
|
||||
|
||||
log.Infof("Storage deal %d activated at epoch %d", res.DealID, res.MarketDeal.State.SectorStartEpoch)
|
||||
|
||||
cb(nil)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
revert := func(ctx context.Context, ts *types.TipSet) error {
|
||||
log.Warn("deal activation reverted; TODO: actually handle this!")
|
||||
// TODO: Just go back to DealSealing?
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := mgr.ev.Called(ctx, checkFunc, called, revert, int(build.MessageConfidence+1), timeoutEpoch, matchEvent); err != nil {
|
||||
return xerrors.Errorf("failed to set up called handler: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dealSectorInReplicaUpdateSuccess(msg *types.Message, rec *types.MessageReceipt, res pipeline.CurrentDealInfo) (*abi.SectorNumber, error) {
|
||||
var params miner.ProveReplicaUpdatesParams
|
||||
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal prove replica update: %w", err)
|
||||
}
|
||||
|
||||
var seekUpdate miner.ReplicaUpdate
|
||||
var found bool
|
||||
for _, update := range params.Updates {
|
||||
for _, did := range update.Deals {
|
||||
if did == res.DealID {
|
||||
seekUpdate = update
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// check that this update passed validation steps
|
||||
var successBf bitfield.BitField
|
||||
if err := successBf.UnmarshalCBOR(bytes.NewReader(rec.Return)); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal return value: %w", err)
|
||||
}
|
||||
success, err := successBf.IsSet(uint64(seekUpdate.SectorID))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to check success of replica update: %w", err)
|
||||
}
|
||||
if !success {
|
||||
return nil, xerrors.Errorf("replica update %d failed", seekUpdate.SectorID)
|
||||
}
|
||||
return &seekUpdate.SectorID, nil
|
||||
}
|
||||
|
||||
// dealSectorInPreCommitMsg tries to find a sector containing the specified deal
|
||||
func dealSectorInPreCommitMsg(msg *types.Message, res pipeline.CurrentDealInfo) (*abi.SectorNumber, error) {
|
||||
switch msg.Method {
|
||||
case builtin.MethodsMiner.PreCommitSector:
|
||||
var params miner.SectorPreCommitInfo
|
||||
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
|
||||
}
|
||||
|
||||
// Check through the deal IDs associated with this message
|
||||
for _, did := range params.DealIDs {
|
||||
if did == res.DealID {
|
||||
// Found the deal ID in this message. Callback with the sector ID.
|
||||
return ¶ms.SectorNumber, nil
|
||||
}
|
||||
}
|
||||
case builtin.MethodsMiner.PreCommitSectorBatch:
|
||||
var params miner.PreCommitSectorBatchParams
|
||||
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
|
||||
}
|
||||
|
||||
for _, precommit := range params.Sectors {
|
||||
// Check through the deal IDs associated with this message
|
||||
for _, did := range precommit.DealIDs {
|
||||
if did == res.DealID {
|
||||
// Found the deal ID in this message. Callback with the sector ID.
|
||||
return &precommit.SectorNumber, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case builtin.MethodsMiner.PreCommitSectorBatch2:
|
||||
var params miner2.PreCommitSectorBatchParams2
|
||||
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal pre commit: %w", err)
|
||||
}
|
||||
|
||||
for _, precommit := range params.Sectors {
|
||||
// Check through the deal IDs associated with this message
|
||||
for _, did := range precommit.DealIDs {
|
||||
if did == res.DealID {
|
||||
// Found the deal ID in this message. Callback with the sector ID.
|
||||
return &precommit.SectorNumber, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, xerrors.Errorf("unexpected method %d", msg.Method)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// sectorInCommitMsg checks if the provided message commits specified sector
|
||||
func sectorInCommitMsg(msg *types.Message, sectorNumber abi.SectorNumber) (bool, error) {
|
||||
switch msg.Method {
|
||||
case builtin.MethodsMiner.ProveCommitSector:
|
||||
var params miner.ProveCommitSectorParams
|
||||
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
|
||||
return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err)
|
||||
}
|
||||
|
||||
return params.SectorNumber == sectorNumber, nil
|
||||
|
||||
case builtin.MethodsMiner.ProveCommitAggregate:
|
||||
var params miner.ProveCommitAggregateParams
|
||||
if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil {
|
||||
return false, xerrors.Errorf("failed to unmarshal prove commit sector params: %w", err)
|
||||
}
|
||||
|
||||
set, err := params.SectorNumbers.IsSet(uint64(sectorNumber))
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("checking if sectorNumber is set in commit aggregate message: %w", err)
|
||||
}
|
||||
|
||||
return set, nil
|
||||
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (mgr *SectorCommittedManager) checkIfDealAlreadyActive(ctx context.Context, ts *types.TipSet, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, bool, error) {
|
||||
res, err := mgr.dealInfo.GetCurrentDealInfo(ctx, ts.Key(), proposal, publishCid)
|
||||
if err != nil {
|
||||
// TODO: This may be fine for some errors
|
||||
return res, false, xerrors.Errorf("failed to look up deal on chain: %w", err)
|
||||
}
|
||||
|
||||
// Sector was slashed
|
||||
if res.MarketDeal.State.SlashEpoch > 0 {
|
||||
return res, false, xerrors.Errorf("deal %d was slashed at epoch %d", res.DealID, res.MarketDeal.State.SlashEpoch)
|
||||
}
|
||||
|
||||
// Sector with deal is already active
|
||||
if res.MarketDeal.State.SectorStartEpoch > 0 {
|
||||
return res, true, nil
|
||||
}
|
||||
|
||||
return res, false, nil
|
||||
}
|
||||
@ -1,583 +0,0 @@
|
||||
// stm: #unit
|
||||
package storageadapter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
tutils "github.com/filecoin-project/specs-actors/v2/support/testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
test "github.com/filecoin-project/lotus/chain/events/state/mock"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
pipeline "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
)
|
||||
|
||||
func TestOnDealSectorPreCommitted(t *testing.T) {
|
||||
label, err := markettypes.NewLabelFromString("success")
|
||||
require.NoError(t, err)
|
||||
|
||||
provider := address.TestAddress
|
||||
ctx := context.Background()
|
||||
publishCid := generateCids(1)[0]
|
||||
sealedCid := generateCids(1)[0]
|
||||
pieceCid := generateCids(1)[0]
|
||||
dealID := abi.DealID(rand.Uint64())
|
||||
sectorNumber := abi.SectorNumber(rand.Uint64())
|
||||
proposal := market.DealProposal{
|
||||
PieceCID: pieceCid,
|
||||
PieceSize: abi.PaddedPieceSize(rand.Uint64()),
|
||||
Client: tutils.NewActorAddr(t, "client"),
|
||||
Provider: tutils.NewActorAddr(t, "provider"),
|
||||
StoragePricePerEpoch: abi.NewTokenAmount(1),
|
||||
ProviderCollateral: abi.NewTokenAmount(1),
|
||||
ClientCollateral: abi.NewTokenAmount(1),
|
||||
Label: label,
|
||||
}
|
||||
unfinishedDeal := &api.MarketDeal{
|
||||
Proposal: proposal,
|
||||
State: api.MarketDealState{
|
||||
SectorStartEpoch: -1,
|
||||
LastUpdatedEpoch: 2,
|
||||
},
|
||||
}
|
||||
activeDeal := &api.MarketDeal{
|
||||
Proposal: proposal,
|
||||
State: api.MarketDealState{
|
||||
SectorStartEpoch: 1,
|
||||
LastUpdatedEpoch: 2,
|
||||
},
|
||||
}
|
||||
slashedDeal := &api.MarketDeal{
|
||||
Proposal: proposal,
|
||||
State: api.MarketDealState{
|
||||
SectorStartEpoch: 1,
|
||||
LastUpdatedEpoch: 2,
|
||||
SlashEpoch: 2,
|
||||
},
|
||||
}
|
||||
type testCase struct {
|
||||
currentDealInfo pipeline.CurrentDealInfo
|
||||
currentDealInfoErr error
|
||||
currentDealInfoErr2 error
|
||||
preCommitDiff *miner.PreCommitChanges
|
||||
matchStates []matchState
|
||||
dealStartEpochTimeout bool
|
||||
expectedCBCallCount uint64
|
||||
expectedCBSectorNumber abi.SectorNumber
|
||||
expectedCBIsActive bool
|
||||
expectedCBError error
|
||||
expectedError error
|
||||
}
|
||||
testCases := map[string]testCase{
|
||||
"normal sequence": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
matchStates: []matchState{
|
||||
{
|
||||
msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{
|
||||
SectorNumber: sectorNumber,
|
||||
SealedCID: sealedCid,
|
||||
DealIDs: []abi.DealID{dealID},
|
||||
}),
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBIsActive: false,
|
||||
expectedCBSectorNumber: sectorNumber,
|
||||
},
|
||||
"ignores unsuccessful pre-commit message": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
matchStates: []matchState{
|
||||
{
|
||||
msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{
|
||||
SectorNumber: sectorNumber,
|
||||
SealedCID: sealedCid,
|
||||
DealIDs: []abi.DealID{dealID},
|
||||
}),
|
||||
// non-zero exit code indicates unsuccessful pre-commit message
|
||||
receipt: &types.MessageReceipt{ExitCode: 1},
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 0,
|
||||
},
|
||||
"deal already pre-committed": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
preCommitDiff: &miner.PreCommitChanges{
|
||||
Added: []minertypes.SectorPreCommitOnChainInfo{{
|
||||
Info: minertypes.SectorPreCommitInfo{
|
||||
SectorNumber: sectorNumber,
|
||||
DealIDs: []abi.DealID{dealID},
|
||||
},
|
||||
}},
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBIsActive: false,
|
||||
expectedCBSectorNumber: sectorNumber,
|
||||
},
|
||||
"error getting current deal info in check func": {
|
||||
currentDealInfoErr: errors.New("something went wrong"),
|
||||
expectedCBCallCount: 0,
|
||||
expectedError: xerrors.Errorf("failed to set up called handler: failed to check deal activity: failed to look up deal on chain: something went wrong"),
|
||||
},
|
||||
"sector already active": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: activeDeal,
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBIsActive: true,
|
||||
},
|
||||
"sector was slashed": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: slashedDeal,
|
||||
PublishMsgTipSet: types.EmptyTSK,
|
||||
},
|
||||
expectedCBCallCount: 0,
|
||||
expectedError: xerrors.Errorf("failed to set up called handler: failed to check deal activity: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch),
|
||||
},
|
||||
"error getting current deal info in called func": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
currentDealInfoErr2: errors.New("something went wrong"),
|
||||
matchStates: []matchState{
|
||||
{
|
||||
msg: makeMessage(t, provider, builtin.MethodsMiner.PreCommitSector, &minertypes.PreCommitSectorParams{
|
||||
SectorNumber: sectorNumber,
|
||||
SealedCID: sealedCid,
|
||||
DealIDs: []abi.DealID{dealID},
|
||||
}),
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBError: errors.New("handling applied event: failed to get dealinfo: something went wrong"),
|
||||
},
|
||||
"proposed deal epoch timeout": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: activeDeal,
|
||||
},
|
||||
dealStartEpochTimeout: true,
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID),
|
||||
},
|
||||
}
|
||||
runTestCase := func(testCase string, data testCase) {
|
||||
t.Run(testCase, func(t *testing.T) {
|
||||
checkTs, err := test.MockTipset(provider, rand.Uint64())
|
||||
require.NoError(t, err)
|
||||
matchMessages := make([]matchMessage, len(data.matchStates))
|
||||
for i, ms := range data.matchStates {
|
||||
matchTs, err := test.MockTipset(provider, rand.Uint64())
|
||||
require.NoError(t, err)
|
||||
matchMessages[i] = matchMessage{
|
||||
curH: 5,
|
||||
msg: ms.msg,
|
||||
msgReceipt: ms.receipt,
|
||||
ts: matchTs,
|
||||
}
|
||||
}
|
||||
eventsAPI := &fakeEvents{
|
||||
Ctx: ctx,
|
||||
CheckTs: checkTs,
|
||||
MatchMessages: matchMessages,
|
||||
DealStartEpochTimeout: data.dealStartEpochTimeout,
|
||||
}
|
||||
cbCallCount := uint64(0)
|
||||
var cbSectorNumber abi.SectorNumber
|
||||
var cbIsActive bool
|
||||
var cbError error
|
||||
cb := func(secNum abi.SectorNumber, isActive bool, err error) {
|
||||
cbCallCount++
|
||||
cbSectorNumber = secNum
|
||||
cbIsActive = isActive
|
||||
cbError = err
|
||||
}
|
||||
|
||||
mockPCAPI := &mockPreCommitsAPI{
|
||||
PCChanges: data.preCommitDiff,
|
||||
}
|
||||
mockDIAPI := &mockDealInfoAPI{
|
||||
CurrentDealInfo: data.currentDealInfo,
|
||||
CurrentDealInfo2: data.currentDealInfo,
|
||||
Err: data.currentDealInfoErr,
|
||||
Err2: data.currentDealInfoErr2,
|
||||
}
|
||||
scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI)
|
||||
//stm: @MARKET_ADAPTER_ON_SECTOR_PRE_COMMIT_001
|
||||
err = scm.OnDealSectorPreCommitted(ctx, provider, proposal, publishCid, cb)
|
||||
if data.expectedError == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, data.expectedError.Error())
|
||||
}
|
||||
require.Equal(t, data.expectedCBSectorNumber, cbSectorNumber)
|
||||
require.Equal(t, data.expectedCBIsActive, cbIsActive)
|
||||
require.Equal(t, data.expectedCBCallCount, cbCallCount)
|
||||
if data.expectedCBError == nil {
|
||||
require.NoError(t, cbError)
|
||||
} else {
|
||||
require.EqualError(t, cbError, data.expectedCBError.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
for testCase, data := range testCases {
|
||||
runTestCase(testCase, data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnDealSectorCommitted(t *testing.T) {
|
||||
label, err := markettypes.NewLabelFromString("success")
|
||||
require.NoError(t, err)
|
||||
|
||||
provider := address.TestAddress
|
||||
publishCid := generateCids(1)[0]
|
||||
pieceCid := generateCids(1)[0]
|
||||
dealID := abi.DealID(rand.Uint64())
|
||||
sectorNumber := abi.SectorNumber(rand.Uint64())
|
||||
proposal := market.DealProposal{
|
||||
PieceCID: pieceCid,
|
||||
PieceSize: abi.PaddedPieceSize(rand.Uint64()),
|
||||
Client: tutils.NewActorAddr(t, "client"),
|
||||
Provider: tutils.NewActorAddr(t, "provider"),
|
||||
StoragePricePerEpoch: abi.NewTokenAmount(1),
|
||||
ProviderCollateral: abi.NewTokenAmount(1),
|
||||
ClientCollateral: abi.NewTokenAmount(1),
|
||||
Label: label,
|
||||
}
|
||||
unfinishedDeal := &api.MarketDeal{
|
||||
Proposal: proposal,
|
||||
State: api.MarketDealState{
|
||||
SectorStartEpoch: -1,
|
||||
LastUpdatedEpoch: 2,
|
||||
},
|
||||
}
|
||||
activeDeal := &api.MarketDeal{
|
||||
Proposal: proposal,
|
||||
State: api.MarketDealState{
|
||||
SectorStartEpoch: 1,
|
||||
LastUpdatedEpoch: 2,
|
||||
},
|
||||
}
|
||||
slashedDeal := &api.MarketDeal{
|
||||
Proposal: proposal,
|
||||
State: api.MarketDealState{
|
||||
SectorStartEpoch: 1,
|
||||
LastUpdatedEpoch: 2,
|
||||
SlashEpoch: 2,
|
||||
},
|
||||
}
|
||||
type testCase struct {
|
||||
currentDealInfo pipeline.CurrentDealInfo
|
||||
currentDealInfoErr error
|
||||
currentDealInfo2 pipeline.CurrentDealInfo
|
||||
currentDealInfoErr2 error
|
||||
matchStates []matchState
|
||||
dealStartEpochTimeout bool
|
||||
expectedCBCallCount uint64
|
||||
expectedCBError error
|
||||
expectedError error
|
||||
}
|
||||
testCases := map[string]testCase{
|
||||
"normal sequence": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
currentDealInfo2: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: activeDeal,
|
||||
},
|
||||
matchStates: []matchState{
|
||||
{
|
||||
msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{
|
||||
SectorNumber: sectorNumber,
|
||||
}),
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
},
|
||||
"ignores unsuccessful prove-commit message": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
currentDealInfo2: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: activeDeal,
|
||||
},
|
||||
matchStates: []matchState{
|
||||
{
|
||||
msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{
|
||||
SectorNumber: sectorNumber,
|
||||
}),
|
||||
// Exit-code 1 means the prove-commit was unsuccessful
|
||||
receipt: &types.MessageReceipt{ExitCode: 1},
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 0,
|
||||
},
|
||||
"error getting current deal info in check func": {
|
||||
currentDealInfoErr: errors.New("something went wrong"),
|
||||
expectedCBCallCount: 0,
|
||||
expectedError: xerrors.Errorf("failed to set up called handler: failed to look up deal on chain: something went wrong"),
|
||||
},
|
||||
"sector already active": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: activeDeal,
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
},
|
||||
"sector was slashed": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: slashedDeal,
|
||||
},
|
||||
expectedCBCallCount: 0,
|
||||
expectedError: xerrors.Errorf("failed to set up called handler: deal %d was slashed at epoch %d", dealID, slashedDeal.State.SlashEpoch),
|
||||
},
|
||||
"error getting current deal info in called func": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
currentDealInfoErr2: errors.New("something went wrong"),
|
||||
matchStates: []matchState{
|
||||
{
|
||||
msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{
|
||||
SectorNumber: sectorNumber,
|
||||
}),
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBError: xerrors.Errorf("handling applied event: failed to look up deal on chain: something went wrong"),
|
||||
},
|
||||
"proposed deal epoch timeout": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
dealStartEpochTimeout: true,
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBError: xerrors.Errorf("handling applied event: deal with piece CID %s was not activated by proposed deal start epoch 0", unfinishedDeal.Proposal.PieceCID),
|
||||
},
|
||||
"got prove-commit but deal not active": {
|
||||
currentDealInfo: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
currentDealInfo2: pipeline.CurrentDealInfo{
|
||||
DealID: dealID,
|
||||
MarketDeal: unfinishedDeal,
|
||||
},
|
||||
matchStates: []matchState{
|
||||
{
|
||||
msg: makeMessage(t, provider, builtin.MethodsMiner.ProveCommitSector, &minertypes.ProveCommitSectorParams{
|
||||
SectorNumber: sectorNumber,
|
||||
}),
|
||||
},
|
||||
},
|
||||
expectedCBCallCount: 1,
|
||||
expectedCBError: xerrors.Errorf("handling applied event: deal wasn't active: deal=%d, parentState=bafkqaaa, h=5", dealID),
|
||||
},
|
||||
}
|
||||
runTestCase := func(testCase string, data testCase) {
|
||||
t.Run(testCase, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
checkTs, err := test.MockTipset(provider, rand.Uint64())
|
||||
require.NoError(t, err)
|
||||
matchMessages := make([]matchMessage, len(data.matchStates))
|
||||
for i, ms := range data.matchStates {
|
||||
matchTs, err := test.MockTipset(provider, rand.Uint64())
|
||||
require.NoError(t, err)
|
||||
matchMessages[i] = matchMessage{
|
||||
curH: 5,
|
||||
msg: ms.msg,
|
||||
msgReceipt: ms.receipt,
|
||||
ts: matchTs,
|
||||
}
|
||||
}
|
||||
eventsAPI := &fakeEvents{
|
||||
Ctx: ctx,
|
||||
CheckTs: checkTs,
|
||||
MatchMessages: matchMessages,
|
||||
DealStartEpochTimeout: data.dealStartEpochTimeout,
|
||||
}
|
||||
cbCallCount := uint64(0)
|
||||
var cbError error
|
||||
cb := func(err error) {
|
||||
cbCallCount++
|
||||
cbError = err
|
||||
}
|
||||
mockPCAPI := &mockPreCommitsAPI{}
|
||||
mockDIAPI := &mockDealInfoAPI{
|
||||
CurrentDealInfo: data.currentDealInfo,
|
||||
CurrentDealInfo2: data.currentDealInfo2,
|
||||
Err: data.currentDealInfoErr,
|
||||
Err2: data.currentDealInfoErr2,
|
||||
}
|
||||
scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI)
|
||||
//stm: @MARKET_ADAPTER_ON_SECTOR_COMMIT_001
|
||||
err = scm.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, publishCid, cb)
|
||||
if data.expectedError == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.EqualError(t, err, data.expectedError.Error())
|
||||
}
|
||||
require.Equal(t, data.expectedCBCallCount, cbCallCount)
|
||||
if data.expectedCBError == nil {
|
||||
require.NoError(t, cbError)
|
||||
} else {
|
||||
require.EqualError(t, cbError, data.expectedCBError.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
for testCase, data := range testCases {
|
||||
runTestCase(testCase, data)
|
||||
}
|
||||
}
|
||||
|
||||
type matchState struct {
|
||||
msg *types.Message
|
||||
receipt *types.MessageReceipt
|
||||
}
|
||||
|
||||
type matchMessage struct {
|
||||
curH abi.ChainEpoch
|
||||
msg *types.Message
|
||||
msgReceipt *types.MessageReceipt
|
||||
ts *types.TipSet
|
||||
doesRevert bool
|
||||
}
|
||||
type fakeEvents struct {
|
||||
Ctx context.Context
|
||||
CheckTs *types.TipSet
|
||||
MatchMessages []matchMessage
|
||||
DealStartEpochTimeout bool
|
||||
}
|
||||
|
||||
func (fe *fakeEvents) Called(ctx context.Context, check events.CheckFunc, msgHnd events.MsgHandler, rev events.RevertHandler, confidence int, timeout abi.ChainEpoch, mf events.MsgMatchFunc) error {
|
||||
if fe.DealStartEpochTimeout {
|
||||
msgHnd(nil, nil, nil, 100) // nolint:errcheck
|
||||
return nil
|
||||
}
|
||||
|
||||
_, more, err := check(ctx, fe.CheckTs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !more {
|
||||
return nil
|
||||
}
|
||||
for _, matchMessage := range fe.MatchMessages {
|
||||
matched, err := mf(matchMessage.msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if matched {
|
||||
receipt := matchMessage.msgReceipt
|
||||
if receipt == nil {
|
||||
receipt = &types.MessageReceipt{ExitCode: 0}
|
||||
}
|
||||
more, err := msgHnd(matchMessage.msg, receipt, matchMessage.ts, matchMessage.curH)
|
||||
if err != nil {
|
||||
// error is handled through a callback rather than being returned
|
||||
return nil
|
||||
}
|
||||
if matchMessage.doesRevert {
|
||||
err := rev(ctx, matchMessage.ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !more {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeMessage(t *testing.T, to address.Address, method abi.MethodNum, params cbor.Marshaler) *types.Message {
|
||||
buf := new(bytes.Buffer)
|
||||
err := params.MarshalCBOR(buf)
|
||||
require.NoError(t, err)
|
||||
return &types.Message{
|
||||
To: to,
|
||||
Method: method,
|
||||
Params: buf.Bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
var seq int
|
||||
|
||||
func generateCids(n int) []cid.Cid {
|
||||
cids := make([]cid.Cid, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
c := blocks.NewBlock([]byte(fmt.Sprint(seq))).Cid()
|
||||
seq++
|
||||
cids = append(cids, c)
|
||||
}
|
||||
return cids
|
||||
}
|
||||
|
||||
type mockPreCommitsAPI struct {
|
||||
PCChanges *miner.PreCommitChanges
|
||||
Err error
|
||||
}
|
||||
|
||||
func (m *mockPreCommitsAPI) diffPreCommits(ctx context.Context, actor address.Address, pre, cur types.TipSetKey) (*miner.PreCommitChanges, error) {
|
||||
pcc := &miner.PreCommitChanges{}
|
||||
if m.PCChanges != nil {
|
||||
pcc = m.PCChanges
|
||||
}
|
||||
return pcc, m.Err
|
||||
}
|
||||
|
||||
type mockDealInfoAPI struct {
|
||||
count int
|
||||
CurrentDealInfo pipeline.CurrentDealInfo
|
||||
Err error
|
||||
CurrentDealInfo2 pipeline.CurrentDealInfo
|
||||
Err2 error
|
||||
}
|
||||
|
||||
func (m *mockDealInfoAPI) GetCurrentDealInfo(ctx context.Context, tsk types.TipSetKey, proposal *market.DealProposal, publishCid cid.Cid) (pipeline.CurrentDealInfo, error) {
|
||||
m.count++
|
||||
if m.count == 2 {
|
||||
return m.CurrentDealInfo2, m.Err2
|
||||
}
|
||||
return m.CurrentDealInfo, m.Err
|
||||
}
|
||||
@ -1,441 +0,0 @@
|
||||
package storageadapter
|
||||
|
||||
// this file implements storagemarket.StorageProviderNode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
"github.com/filecoin-project/lotus/markets/utils"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
pipeline "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/piece"
|
||||
"github.com/filecoin-project/lotus/storage/sectorblocks"
|
||||
)
|
||||
|
||||
var addPieceRetryWait = 5 * time.Minute
|
||||
var addPieceRetryTimeout = 6 * time.Hour
|
||||
var defaultMaxProviderCollateralMultiplier = uint64(2)
|
||||
var log = logging.Logger("storageadapter")
|
||||
|
||||
type ProviderNodeAdapter struct {
|
||||
v1api.FullNode
|
||||
|
||||
secb *sectorblocks.SectorBlocks
|
||||
ev *events.Events
|
||||
|
||||
dealPublisher *DealPublisher
|
||||
|
||||
addBalanceSpec *api.MessageSendSpec
|
||||
maxDealCollateralMultiplier uint64
|
||||
dsMatcher *dealStateMatcher
|
||||
scMgr *SectorCommittedManager
|
||||
}
|
||||
|
||||
func NewProviderNodeAdapter(fc *config.MinerFeeConfig, dc *config.DealmakingConfig) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) (storagemarket.StorageProviderNode, error) {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, secb *sectorblocks.SectorBlocks, full v1api.FullNode, dealPublisher *DealPublisher) (storagemarket.StorageProviderNode, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
|
||||
ev, err := events.NewEvents(ctx, full)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
na := &ProviderNodeAdapter{
|
||||
FullNode: full,
|
||||
|
||||
secb: secb,
|
||||
ev: ev,
|
||||
dealPublisher: dealPublisher,
|
||||
dsMatcher: newDealStateMatcher(state.NewStatePredicates(state.WrapFastAPI(full))),
|
||||
}
|
||||
if fc != nil {
|
||||
na.addBalanceSpec = &api.MessageSendSpec{MaxFee: abi.TokenAmount(fc.MaxMarketBalanceAddFee)}
|
||||
}
|
||||
na.maxDealCollateralMultiplier = defaultMaxProviderCollateralMultiplier
|
||||
if dc != nil {
|
||||
na.maxDealCollateralMultiplier = dc.MaxProviderCollateralMultiplier
|
||||
}
|
||||
na.scMgr = NewSectorCommittedManager(ev, na, &apiWrapper{api: full})
|
||||
|
||||
return na, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) {
|
||||
return n.dealPublisher.Publish(ctx, deal.ClientDealProposal)
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData shared.ReadSeekStarter) (*storagemarket.PackingResult, error) {
|
||||
if deal.PublishCid == nil {
|
||||
return nil, xerrors.Errorf("deal.PublishCid can't be nil")
|
||||
}
|
||||
|
||||
sdInfo := piece.PieceDealInfo{
|
||||
DealID: deal.DealID,
|
||||
DealProposal: &deal.Proposal,
|
||||
PublishCid: deal.PublishCid,
|
||||
DealSchedule: piece.DealSchedule{
|
||||
StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch,
|
||||
EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch,
|
||||
},
|
||||
KeepUnsealed: deal.FastRetrieval,
|
||||
}
|
||||
|
||||
// Attempt to add the piece to the sector
|
||||
p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo)
|
||||
curTime := build.Clock.Now()
|
||||
for build.Clock.Since(curTime) < addPieceRetryTimeout {
|
||||
// Check if there was an error because of too many sectors being sealed
|
||||
if !xerrors.Is(err, pipeline.ErrTooManySectorsSealing) {
|
||||
if err != nil {
|
||||
log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err)
|
||||
}
|
||||
|
||||
// There was either a fatal error or no error. In either case
|
||||
// don't retry AddPiece
|
||||
break
|
||||
}
|
||||
|
||||
// The piece could not be added to the sector because there are too
|
||||
// many sectors being sealed, back-off for a while before trying again
|
||||
select {
|
||||
case <-build.Clock.After(addPieceRetryWait):
|
||||
// Reset the reader to the start
|
||||
err = pieceData.SeekStart()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to reset piece reader to start before retrying AddPiece for deal %d: %w", deal.DealID, err)
|
||||
}
|
||||
|
||||
// Attempt to add the piece again
|
||||
p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo)
|
||||
case <-ctx.Done():
|
||||
return nil, xerrors.New("context expired while waiting to retry AddPiece")
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("AddPiece failed: %s", err)
|
||||
}
|
||||
log.Warnf("New Deal: deal %d", deal.DealID)
|
||||
|
||||
return &storagemarket.PackingResult{
|
||||
SectorNumber: p,
|
||||
Offset: offset,
|
||||
Size: pieceSize.Padded(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) VerifySignature(ctx context.Context, sig crypto.Signature, addr address.Address, input []byte, encodedTs shared.TipSetToken) (bool, error) {
|
||||
addr, err := n.StateAccountKey(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
err = sigs.Verify(&sig, addr, input)
|
||||
return err == nil, err
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetMinerWorkerAddress(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (address.Address, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
mi, err := n.StateMinerInfo(ctx, maddr, tsk)
|
||||
if err != nil {
|
||||
return address.Address{}, err
|
||||
}
|
||||
return mi.Worker, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetProofType(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (abi.RegisteredSealProof, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(tok)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
mi, err := n.StateMinerInfo(ctx, maddr, tsk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nver, err := n.StateNetworkVersion(ctx, tsk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// false because this variance is not consumed.
|
||||
const configWantSynthetic = false
|
||||
|
||||
return miner.PreferredSealProofTypeFromWindowPoStType(nver, mi.WindowPoStProofType, configWantSynthetic)
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) {
|
||||
signer, err := n.StateAccountKey(ctx, signer, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localSignature, err := n.WalletSign(ctx, signer, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return localSignature, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) ReserveFunds(ctx context.Context, wallet, addr address.Address, amt abi.TokenAmount) (cid.Cid, error) {
|
||||
return n.MarketReserveFunds(ctx, wallet, addr, amt)
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) ReleaseFunds(ctx context.Context, addr address.Address, amt abi.TokenAmount) error {
|
||||
return n.MarketReleaseFunds(ctx, addr, amt)
|
||||
}
|
||||
|
||||
// Adds funds with the StorageMinerActor for a storage participant. Used by both providers and clients.
|
||||
func (n *ProviderNodeAdapter) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) {
|
||||
// (Provider Node API)
|
||||
smsg, err := n.MpoolPushMessage(ctx, &types.Message{
|
||||
To: market.Address,
|
||||
From: addr,
|
||||
Value: amount,
|
||||
Method: market.Methods.AddBalance,
|
||||
}, n.addBalanceSpec)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetBalance(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (storagemarket.Balance, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(encodedTs)
|
||||
if err != nil {
|
||||
return storagemarket.Balance{}, err
|
||||
}
|
||||
|
||||
bal, err := n.StateMarketBalance(ctx, addr, tsk)
|
||||
if err != nil {
|
||||
return storagemarket.Balance{}, err
|
||||
}
|
||||
|
||||
return utils.ToSharedBalance(bal), nil
|
||||
}
|
||||
|
||||
// TODO: why doesnt this method take in a sector ID?
|
||||
func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, encodedTs shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) {
|
||||
refs, err := n.secb.GetRefs(ctx, dealID)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
return 0, 0, 0, xerrors.New("no sector information for deal ID")
|
||||
}
|
||||
|
||||
// TODO: better strategy (e.g. look for already unsealed)
|
||||
var best api.SealedRef
|
||||
var bestSi api.SectorInfo
|
||||
for _, r := range refs {
|
||||
si, err := n.secb.SectorBuilder.SectorsStatus(ctx, r.SectorID, false)
|
||||
if err != nil {
|
||||
return 0, 0, 0, xerrors.Errorf("getting sector info: %w", err)
|
||||
}
|
||||
if si.State == api.SectorState(pipeline.Proving) {
|
||||
best = r
|
||||
bestSi = si
|
||||
break
|
||||
}
|
||||
}
|
||||
if bestSi.State == api.SectorState(pipeline.UndefinedSectorState) {
|
||||
return 0, 0, 0, xerrors.New("no sealed sector found")
|
||||
}
|
||||
return best.SectorID, best.Offset, best.Size.Padded(), nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) {
|
||||
bounds, err := n.StateDealProviderCollateralBounds(ctx, size, isVerified, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return abi.TokenAmount{}, abi.TokenAmount{}, err
|
||||
}
|
||||
|
||||
// The maximum amount of collateral that the provider will put into escrow
|
||||
// for a deal is calculated as a multiple of the minimum bounded amount
|
||||
max := types.BigMul(bounds.Min, types.NewInt(n.maxDealCollateralMultiplier))
|
||||
|
||||
return bounds.Min, max, nil
|
||||
}
|
||||
|
||||
// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer)
|
||||
func (n *ProviderNodeAdapter) OnDealSectorPreCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorPreCommittedCallback) error {
|
||||
return n.scMgr.OnDealSectorPreCommitted(ctx, provider, proposal, *publishCid, cb)
|
||||
}
|
||||
|
||||
// TODO: Remove dealID parameter, change publishCid to be cid.Cid (instead of pointer)
|
||||
func (n *ProviderNodeAdapter) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, sectorNumber abi.SectorNumber, proposal markettypes.DealProposal, publishCid *cid.Cid, cb storagemarket.DealSectorCommittedCallback) error {
|
||||
return n.scMgr.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, *publishCid, cb)
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) {
|
||||
head, err := n.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return head.Key().Bytes(), head.Height(), nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) WaitForMessage(ctx context.Context, mcid cid.Cid, cb func(code exitcode.ExitCode, bytes []byte, finalCid cid.Cid, err error) error) error {
|
||||
receipt, err := n.StateWaitMsg(ctx, mcid, 2*build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
return cb(0, nil, cid.Undef, err)
|
||||
}
|
||||
return cb(receipt.Receipt.ExitCode, receipt.Receipt.Return, receipt.Message, nil)
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) WaitForPublishDeals(ctx context.Context, publishCid cid.Cid, proposal markettypes.DealProposal) (*storagemarket.PublishDealsWaitResult, error) {
|
||||
// Wait for deal to be published (plus additional time for confidence)
|
||||
receipt, err := n.StateWaitMsg(ctx, publishCid, 2*build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("WaitForPublishDeals errored: %w", err)
|
||||
}
|
||||
if receipt.Receipt.ExitCode != exitcode.Ok {
|
||||
return nil, xerrors.Errorf("WaitForPublishDeals exit code: %s", receipt.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
// The deal ID may have changed since publish if there was a reorg, so
|
||||
// get the current deal ID
|
||||
head, err := n.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("WaitForPublishDeals failed to get chain head: %w", err)
|
||||
}
|
||||
|
||||
res, err := n.scMgr.dealInfo.GetCurrentDealInfo(ctx, head.Key(), &proposal, publishCid)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("WaitForPublishDeals getting deal info errored: %w", err)
|
||||
}
|
||||
|
||||
return &storagemarket.PublishDealsWaitResult{DealID: res.DealID, FinalCid: receipt.Message}, nil
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) GetDataCap(ctx context.Context, addr address.Address, encodedTs shared.TipSetToken) (*abi.StoragePower, error) {
|
||||
tsk, err := types.TipSetKeyFromBytes(encodedTs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sp, err := n.StateVerifiedClientStatus(ctx, addr, tsk)
|
||||
return sp, err
|
||||
}
|
||||
|
||||
func (n *ProviderNodeAdapter) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error {
|
||||
head, err := n.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("client: failed to get chain head: %w", err)
|
||||
}
|
||||
|
||||
sd, err := n.StateMarketStorageDeal(ctx, dealID, head.Key())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("client: failed to look up deal %d on chain: %w", dealID, err)
|
||||
}
|
||||
|
||||
// Called immediately to check if the deal has already expired or been slashed
|
||||
checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) {
|
||||
if ts == nil {
|
||||
// keep listening for events
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// Check if the deal has already expired
|
||||
if sd.Proposal.EndEpoch <= ts.Height() {
|
||||
onDealExpired(nil)
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// If there is no deal assume it's already been slashed
|
||||
if sd.State.SectorStartEpoch < 0 {
|
||||
onDealSlashed(ts.Height(), nil)
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// No events have occurred yet, so return
|
||||
// done: false, more: true (keep listening for events)
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// Called when there was a match against the state change we're looking for
|
||||
// and the chain has advanced to the confidence height
|
||||
stateChanged := func(ts *types.TipSet, ts2 *types.TipSet, states events.StateChange, h abi.ChainEpoch) (more bool, err error) {
|
||||
// Check if the deal has already expired
|
||||
if ts2 == nil || sd.Proposal.EndEpoch <= ts2.Height() {
|
||||
onDealExpired(nil)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Timeout waiting for state change
|
||||
if states == nil {
|
||||
log.Error("timed out waiting for deal expiry")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
changedDeals, ok := states.(state.ChangedDeals)
|
||||
if !ok {
|
||||
panic("Expected state.ChangedDeals")
|
||||
}
|
||||
|
||||
deal, ok := changedDeals[dealID]
|
||||
if !ok {
|
||||
// No change to deal
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Deal was slashed
|
||||
if deal.To == nil {
|
||||
onDealSlashed(ts2.Height(), nil)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Called when there was a chain reorg and the state change was reverted
|
||||
revert := func(ctx context.Context, ts *types.TipSet) error {
|
||||
// TODO: Is it ok to just ignore this?
|
||||
log.Warn("deal state reverted; TODO: actually handle this!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch for state changes to the deal
|
||||
match := n.dsMatcher.matcher(ctx, dealID)
|
||||
|
||||
// Wait until after the end epoch for the deal and then timeout
|
||||
timeout := (sd.Proposal.EndEpoch - head.Height()) + 1
|
||||
if err := n.ev.StateChanged(checkFunc, stateChanged, revert, int(build.MessageConfidence)+1, timeout, match); err != nil {
|
||||
return xerrors.Errorf("failed to set up state changed handler: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ storagemarket.StorageProviderNode = &ProviderNodeAdapter{}
|
||||
@ -1,39 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
func NewStorageProviderInfo(address address.Address, miner address.Address, sectorSize abi.SectorSize, peer peer.ID, addrs []abi.Multiaddrs) storagemarket.StorageProviderInfo {
|
||||
multiaddrs := make([]multiaddr.Multiaddr, 0, len(addrs))
|
||||
for _, a := range addrs {
|
||||
maddr, err := multiaddr.NewMultiaddrBytes(a)
|
||||
if err != nil {
|
||||
return storagemarket.StorageProviderInfo{}
|
||||
}
|
||||
multiaddrs = append(multiaddrs, maddr)
|
||||
}
|
||||
|
||||
return storagemarket.StorageProviderInfo{
|
||||
Address: address,
|
||||
Worker: miner,
|
||||
SectorSize: uint64(sectorSize),
|
||||
PeerID: peer,
|
||||
Addrs: multiaddrs,
|
||||
}
|
||||
}
|
||||
|
||||
func ToSharedBalance(bal api.MarketBalance) storagemarket.Balance {
|
||||
return storagemarket.Balance{
|
||||
Locked: bal.Locked,
|
||||
Available: big.Sub(bal.Escrow, bal.Locked),
|
||||
}
|
||||
}
|
||||
@ -1,98 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
// must be imported to init() raw-codec support
|
||||
_ "github.com/ipld/go-ipld-prime/codec/raw"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
mdagipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-unixfsnode"
|
||||
dagpb "github.com/ipld/go-codec-dagpb"
|
||||
"github.com/ipld/go-ipld-prime"
|
||||
cidlink "github.com/ipld/go-ipld-prime/linking/cid"
|
||||
basicnode "github.com/ipld/go-ipld-prime/node/basic"
|
||||
"github.com/ipld/go-ipld-prime/traversal"
|
||||
"github.com/ipld/go-ipld-prime/traversal/selector"
|
||||
selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse"
|
||||
)
|
||||
|
||||
func TraverseDag(
|
||||
ctx context.Context,
|
||||
ds mdagipld.DAGService,
|
||||
startFrom cid.Cid,
|
||||
optionalSelector ipld.Node,
|
||||
onOpen func(node mdagipld.Node) error,
|
||||
visitCallback traversal.AdvVisitFn,
|
||||
) error {
|
||||
|
||||
if optionalSelector == nil {
|
||||
optionalSelector = selectorparse.CommonSelector_MatchAllRecursively
|
||||
}
|
||||
|
||||
parsedSelector, err := selector.ParseSelector(optionalSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// not sure what this is for TBH: we also provide ctx in &traversal.Config{}
|
||||
linkContext := ipld.LinkContext{Ctx: ctx}
|
||||
|
||||
// this is what allows us to understand dagpb
|
||||
nodePrototypeChooser := dagpb.AddSupportToChooser(
|
||||
func(ipld.Link, ipld.LinkContext) (ipld.NodePrototype, error) {
|
||||
return basicnode.Prototype.Any, nil
|
||||
},
|
||||
)
|
||||
|
||||
// this is how we implement GETs
|
||||
linkSystem := cidlink.DefaultLinkSystem()
|
||||
linkSystem.StorageReadOpener = func(lctx ipld.LinkContext, lnk ipld.Link) (io.Reader, error) {
|
||||
cl, isCid := lnk.(cidlink.Link)
|
||||
if !isCid {
|
||||
return nil, fmt.Errorf("unexpected link type %#v", lnk)
|
||||
}
|
||||
|
||||
node, err := ds.Get(lctx.Ctx, cl.Cid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if onOpen != nil {
|
||||
if err := onOpen(node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return bytes.NewBuffer(node.RawData()), nil
|
||||
}
|
||||
unixfsnode.AddUnixFSReificationToLinkSystem(&linkSystem)
|
||||
|
||||
// this is how we pull the start node out of the DS
|
||||
startLink := cidlink.Link{Cid: startFrom}
|
||||
startNodePrototype, err := nodePrototypeChooser(startLink, linkContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
startNode, err := linkSystem.Load(
|
||||
linkContext,
|
||||
startLink,
|
||||
startNodePrototype,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// this is the actual execution, invoking the supplied callback
|
||||
return traversal.Progress{
|
||||
Cfg: &traversal.Config{
|
||||
Ctx: ctx,
|
||||
LinkSystem: linkSystem,
|
||||
LinkTargetNodePrototypeChooser: nodePrototypeChooser,
|
||||
},
|
||||
}.WalkAdv(startNode, parsedSelector, visitCallback)
|
||||
}
|
||||
@ -33,7 +33,6 @@ import (
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/delegated"
|
||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl/common"
|
||||
"github.com/filecoin-project/lotus/node/impl/net"
|
||||
@ -69,9 +68,7 @@ var (
|
||||
AutoNATSvcKey = special{10} // Libp2p option
|
||||
BandwidthReporterKey = special{11} // Libp2p option
|
||||
ConnGaterKey = special{12} // Libp2p option
|
||||
DAGStoreKey = special{13} // constructor returns multiple values
|
||||
ResourceManagerKey = special{14} // Libp2p option
|
||||
UserAgentKey = special{15} // Libp2p option
|
||||
)
|
||||
|
||||
type invoke int
|
||||
@ -91,7 +88,6 @@ const (
|
||||
CheckFDLimit
|
||||
CheckFvmConcurrency
|
||||
CheckUDPBufferSize
|
||||
LegacyMarketsEOL
|
||||
|
||||
// libp2p
|
||||
PstoreAddSelfKeysKey
|
||||
@ -108,7 +104,6 @@ const (
|
||||
|
||||
HandleIncomingBlocksKey
|
||||
HandleIncomingMessagesKey
|
||||
HandleMigrateClientFundsKey
|
||||
HandlePaymentChannelManagerKey
|
||||
|
||||
RelayIndexerMessagesKey
|
||||
@ -397,7 +392,6 @@ func Test() Option {
|
||||
Unset(RunPeerMgrKey),
|
||||
Unset(new(*peermgr.PeerMgr)),
|
||||
Override(new(beacon.Schedule), testing.RandomBeacon),
|
||||
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})),
|
||||
Override(new(index.MsgIndex), modules.DummyMsgIndex),
|
||||
)
|
||||
}
|
||||
|
||||
@ -6,11 +6,6 @@ import (
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/discovery"
|
||||
discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
@ -33,8 +28,6 @@ import (
|
||||
ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/remotewallet"
|
||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/hello"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
@ -105,9 +98,6 @@ var ChainNode = Options(
|
||||
Override(new(*messagepool.MessagePool), modules.MessagePool),
|
||||
Override(new(*dtypes.MpoolLocker), new(dtypes.MpoolLocker)),
|
||||
|
||||
// Shared graphsync (markets, serving chain)
|
||||
Override(new(dtypes.Graphsync), modules.Graphsync(config.DefaultFullNode().Client.SimultaneousTransfersForStorage, config.DefaultFullNode().Client.SimultaneousTransfersForRetrieval)),
|
||||
|
||||
// Service: Wallet
|
||||
Override(new(*messagesigner.MessageSigner), messagesigner.NewMessageSigner),
|
||||
Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSigner) *messagesigner.MessageSigner { return ms }),
|
||||
@ -122,23 +112,8 @@ var ChainNode = Options(
|
||||
Override(HandlePaymentChannelManagerKey, modules.HandlePaychManager),
|
||||
Override(SettlePaymentChannelsKey, settler.SettlePaymentChannels),
|
||||
|
||||
// Markets (common)
|
||||
Override(new(*discoveryimpl.Local), modules.NewLocalDiscovery),
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(discovery.PeerResolver), modules.RetrievalResolver),
|
||||
Override(new(retrievalmarket.BlockstoreAccessor), modules.RetrievalBlockstoreAccessor),
|
||||
Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient(false)),
|
||||
Override(new(dtypes.ClientDataTransfer), modules.NewClientGraphsyncDataTransfer),
|
||||
|
||||
// Markets (storage)
|
||||
Override(new(*market.FundManager), market.NewFundManager),
|
||||
Override(new(dtypes.ClientDatastore), modules.NewClientDatastore),
|
||||
Override(new(storagemarket.BlockstoreAccessor), modules.StorageBlockstoreAccessor),
|
||||
Override(new(*retrievaladapter.APIBlockstoreAccessor), retrievaladapter.NewAPIBlockstoreAdapter),
|
||||
Override(new(storagemarket.StorageClient), modules.StorageClient),
|
||||
Override(new(storagemarket.StorageClientNode), storageadapter.NewClientNodeAdapter),
|
||||
Override(HandleMigrateClientFundsKey, modules.HandleMigrateClientFunds),
|
||||
|
||||
Override(new(*full.GasPriceCache), full.NewGasPriceCache),
|
||||
|
||||
@ -225,14 +200,6 @@ func ConfigFullNode(c interface{}) Option {
|
||||
// as it enables us to serve logs in eth_getTransactionReceipt.
|
||||
If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)),
|
||||
|
||||
Override(new(dtypes.ClientImportMgr), modules.ClientImportMgr),
|
||||
|
||||
Override(new(dtypes.ClientBlockstore), modules.ClientBlockstore),
|
||||
|
||||
Override(new(dtypes.Graphsync), modules.Graphsync(cfg.Client.SimultaneousTransfersForStorage, cfg.Client.SimultaneousTransfersForRetrieval)),
|
||||
|
||||
Override(new(retrievalmarket.RetrievalClient), modules.RetrievalClient(cfg.Client.OffChainRetrieval)),
|
||||
|
||||
If(cfg.Wallet.RemoteBackend != "",
|
||||
Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)),
|
||||
),
|
||||
|
||||
@ -2,16 +2,10 @@ package node
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
provider "github.com/ipni/index-provider"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -20,12 +14,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/markets/dealfilter"
|
||||
"github.com/filecoin-project/lotus/markets/idxprov"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/markets/sectoraccessor"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
@ -62,21 +50,6 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
return Error(xerrors.Errorf("invalid config from repo, got: %T", c))
|
||||
}
|
||||
|
||||
pricingConfig := cfg.Dealmaking.RetrievalPricing
|
||||
if pricingConfig.Strategy == config.RetrievalPricingExternalMode {
|
||||
if pricingConfig.External == nil {
|
||||
return Error(xerrors.New("retrieval pricing policy has been to set to external but external policy config is nil"))
|
||||
}
|
||||
|
||||
if pricingConfig.External.Path == "" {
|
||||
return Error(xerrors.New("retrieval pricing policy has been to set to external but external script path is empty"))
|
||||
}
|
||||
} else if pricingConfig.Strategy != config.RetrievalPricingDefaultMode {
|
||||
return Error(xerrors.New("retrieval pricing policy must be either default or external"))
|
||||
}
|
||||
|
||||
enableLibp2pNode := cfg.Subsystems.EnableMarkets // we enable libp2p nodes if the storage market subsystem is enabled, otherwise we don't
|
||||
|
||||
return Options(
|
||||
|
||||
Override(new(v1api.FullNode), modules.MakeUuidWrapper),
|
||||
@ -84,7 +57,7 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
Override(new(dtypes.DrandSchedule), modules.BuiltinDrandConfig),
|
||||
Override(new(dtypes.BootstrapPeers), modules.BuiltinBootstrap),
|
||||
Override(new(dtypes.DrandBootstrap), modules.DrandBootstrap),
|
||||
ConfigCommon(&cfg.Common, build.NodeUserVersion(), enableLibp2pNode),
|
||||
ConfigCommon(&cfg.Common, build.NodeUserVersion(), false),
|
||||
|
||||
Override(CheckFDLimit, modules.CheckFdLimit(build.MinerFDLimit)), // recommend at least 100k FD limit to miners
|
||||
|
||||
@ -93,7 +66,6 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
Override(new(*paths.Local), modules.LocalStorage),
|
||||
Override(new(*paths.Remote), modules.RemoteStorage),
|
||||
Override(new(paths.Store), From(new(*paths.Remote))),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
||||
|
||||
If(cfg.Subsystems.EnableMining || cfg.Subsystems.EnableSealing,
|
||||
Override(GetParamsKey, modules.GetParams(!cfg.Proving.DisableBuiltinWindowPoSt || !cfg.Proving.DisableBuiltinWinningPoSt || cfg.Storage.AllowCommit || cfg.Storage.AllowProveReplicaUpdate2)),
|
||||
@ -164,88 +136,6 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
Override(new(paths.SectorIndex), From(new(modules.MinerSealingService))),
|
||||
),
|
||||
|
||||
If(cfg.Subsystems.EnableMarkets,
|
||||
|
||||
// Alert that legacy-markets is being deprecated
|
||||
Override(LegacyMarketsEOL, modules.LegacyMarketsEOL),
|
||||
|
||||
// Markets
|
||||
Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore),
|
||||
Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForStoragePerClient, cfg.Dealmaking.SimultaneousTransfersForRetrieval)),
|
||||
Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore),
|
||||
Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks),
|
||||
|
||||
// Markets (retrieval deps)
|
||||
Override(new(sectorstorage.PieceProvider), sectorstorage.NewPieceProvider),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(config.DealmakingConfig{
|
||||
RetrievalPricing: &config.RetrievalPricing{
|
||||
Strategy: config.RetrievalPricingDefaultMode,
|
||||
Default: &config.RetrievalPricingDefault{},
|
||||
},
|
||||
})),
|
||||
Override(new(dtypes.RetrievalPricingFunc), modules.RetrievalPricingFunc(cfg.Dealmaking)),
|
||||
|
||||
// DAG Store
|
||||
Override(new(dagstore.MinerAPI), modules.NewMinerAPI(cfg.DAGStore)),
|
||||
Override(DAGStoreKey, modules.DAGStore(cfg.DAGStore)),
|
||||
|
||||
// Markets (retrieval)
|
||||
Override(new(dagstore.SectorAccessor), sectoraccessor.NewSectorAccessor),
|
||||
Override(new(retrievalmarket.SectorAccessor), From(new(dagstore.SectorAccessor))),
|
||||
Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode),
|
||||
Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork),
|
||||
Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider),
|
||||
Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(nil)),
|
||||
Override(HandleRetrievalKey, modules.HandleRetrieval),
|
||||
|
||||
// Markets (storage)
|
||||
Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork),
|
||||
Override(new(dtypes.ProviderTransport), modules.NewProviderTransport),
|
||||
Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDataTransfer),
|
||||
Override(new(idxprov.MeshCreator), idxprov.NewMeshCreator),
|
||||
Override(new(provider.Interface), modules.IndexProvider(cfg.IndexProvider)),
|
||||
Override(new(*storedask.StoredAsk), modules.NewStorageAsk),
|
||||
Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(cfg.Dealmaking, nil)),
|
||||
Override(new(storagemarket.StorageProvider), modules.StorageProvider),
|
||||
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{})),
|
||||
Override(HandleMigrateProviderFundsKey, modules.HandleMigrateProviderFunds),
|
||||
Override(HandleDealsKey, modules.HandleDeals),
|
||||
|
||||
// Config (todo: get a real property system)
|
||||
Override(new(dtypes.ConsiderOnlineStorageDealsConfigFunc), modules.NewConsiderOnlineStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOnlineStorageDealsConfigFunc), modules.NewSetConsideringOnlineStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderOnlineRetrievalDealsConfigFunc), modules.NewConsiderOnlineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOnlineRetrievalDealsConfigFunc), modules.NewSetConsiderOnlineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.StorageDealPieceCidBlocklistConfigFunc), modules.NewStorageDealPieceCidBlocklistConfigFunc),
|
||||
Override(new(dtypes.SetStorageDealPieceCidBlocklistConfigFunc), modules.NewSetStorageDealPieceCidBlocklistConfigFunc),
|
||||
Override(new(dtypes.ConsiderOfflineStorageDealsConfigFunc), modules.NewConsiderOfflineStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOfflineStorageDealsConfigFunc), modules.NewSetConsideringOfflineStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderOfflineRetrievalDealsConfigFunc), modules.NewConsiderOfflineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderOfflineRetrievalDealsConfigFunc), modules.NewSetConsiderOfflineRetrievalDealsConfigFunc),
|
||||
Override(new(dtypes.ConsiderVerifiedStorageDealsConfigFunc), modules.NewConsiderVerifiedStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderVerifiedStorageDealsConfigFunc), modules.NewSetConsideringVerifiedStorageDealsFunc),
|
||||
Override(new(dtypes.ConsiderUnverifiedStorageDealsConfigFunc), modules.NewConsiderUnverifiedStorageDealsConfigFunc),
|
||||
Override(new(dtypes.SetConsiderUnverifiedStorageDealsConfigFunc), modules.NewSetConsideringUnverifiedStorageDealsFunc),
|
||||
Override(new(dtypes.SetExpectedSealDurationFunc), modules.NewSetExpectedSealDurationFunc),
|
||||
Override(new(dtypes.GetExpectedSealDurationFunc), modules.NewGetExpectedSealDurationFunc),
|
||||
Override(new(dtypes.SetMaxDealStartDelayFunc), modules.NewSetMaxDealStartDelayFunc),
|
||||
Override(new(dtypes.GetMaxDealStartDelayFunc), modules.NewGetMaxDealStartDelayFunc),
|
||||
|
||||
If(cfg.Dealmaking.Filter != "",
|
||||
Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(cfg.Dealmaking, dealfilter.CliStorageDealFilter(cfg.Dealmaking.Filter))),
|
||||
),
|
||||
|
||||
If(cfg.Dealmaking.RetrievalFilter != "",
|
||||
Override(new(dtypes.RetrievalDealFilter), modules.RetrievalDealFilter(dealfilter.CliRetrievalDealFilter(cfg.Dealmaking.RetrievalFilter))),
|
||||
),
|
||||
Override(new(*storageadapter.DealPublisher), storageadapter.NewDealPublisher(&cfg.Fees, storageadapter.PublishMsgConfig{
|
||||
Period: time.Duration(cfg.Dealmaking.PublishMsgPeriod),
|
||||
MaxDealsPerMsg: cfg.Dealmaking.MaxDealsPerPublishMsg,
|
||||
StartEpochSealingBuffer: cfg.Dealmaking.StartEpochSealingBuffer,
|
||||
})),
|
||||
Override(new(storagemarket.StorageProviderNode), storageadapter.NewProviderNodeAdapter(&cfg.Fees, &cfg.Dealmaking)),
|
||||
),
|
||||
|
||||
Override(new(config.SealerConfig), cfg.Storage),
|
||||
Override(new(config.ProvingConfig), cfg.Proving),
|
||||
Override(new(config.HarmonyDB), cfg.HarmonyDB),
|
||||
@ -254,7 +144,7 @@ func ConfigStorageMiner(c interface{}) Option {
|
||||
)
|
||||
}
|
||||
|
||||
func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConfig) Option {
|
||||
func StorageMiner(out *api.StorageMiner) Option {
|
||||
return Options(
|
||||
ApplyIf(func(s *Settings) bool { return s.Config },
|
||||
Error(errors.New("the StorageMiner option must be set before Config option")),
|
||||
@ -262,7 +152,6 @@ func StorageMiner(out *api.StorageMiner, subsystemsCfg config.MinerSubsystemConf
|
||||
|
||||
func(s *Settings) error {
|
||||
s.nodeType = repo.StorageMiner
|
||||
s.enableLibp2pNode = subsystemsCfg.EnableMarkets
|
||||
return nil
|
||||
},
|
||||
|
||||
|
||||
@ -2,12 +2,8 @@ package config
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
@ -18,24 +14,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// RetrievalPricingDefault configures the node to use the default retrieval pricing policy.
|
||||
RetrievalPricingDefaultMode = "default"
|
||||
// RetrievalPricingExternal configures the node to use the external retrieval pricing script
|
||||
// configured by the user.
|
||||
RetrievalPricingExternalMode = "external"
|
||||
)
|
||||
|
||||
// MaxTraversalLinks configures the maximum number of links to traverse in a DAG while calculating
|
||||
// CommP and traversing a DAG with graphsync; invokes a budget on DAG depth and density.
|
||||
var MaxTraversalLinks uint64 = 32 * (1 << 20)
|
||||
|
||||
func init() {
|
||||
if envMaxTraversal, err := strconv.ParseUint(os.Getenv("LOTUS_MAX_TRAVERSAL_LINKS"), 10, 64); err == nil {
|
||||
MaxTraversalLinks = envMaxTraversal
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BatchFeeConfig) FeeForSectors(nSectors int) abi.TokenAmount {
|
||||
return big.Add(big.Int(b.Base), big.Mul(big.NewInt(int64(nSectors)), big.Int(b.PerSector)))
|
||||
}
|
||||
@ -77,8 +55,6 @@ func defCommon() Common {
|
||||
}
|
||||
}
|
||||
|
||||
var DefaultSimultaneousTransfers = uint64(20)
|
||||
|
||||
func DefaultDefaultMaxFee() types.FIL {
|
||||
return types.MustParseFIL("0.07")
|
||||
}
|
||||
@ -90,10 +66,7 @@ func DefaultFullNode() *FullNode {
|
||||
Fees: FeeConfig{
|
||||
DefaultMaxFee: DefaultDefaultMaxFee(),
|
||||
},
|
||||
Client: Client{
|
||||
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
|
||||
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
|
||||
},
|
||||
|
||||
Chainstore: Chainstore{
|
||||
EnableSplitstore: true,
|
||||
Splitstore: Splitstore{
|
||||
@ -193,52 +166,13 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
},
|
||||
|
||||
Dealmaking: DealmakingConfig{
|
||||
ConsiderOnlineStorageDeals: true,
|
||||
ConsiderOfflineStorageDeals: true,
|
||||
ConsiderOnlineRetrievalDeals: true,
|
||||
ConsiderOfflineRetrievalDeals: true,
|
||||
ConsiderVerifiedStorageDeals: true,
|
||||
ConsiderUnverifiedStorageDeals: true,
|
||||
PieceCidBlocklist: []cid.Cid{},
|
||||
// TODO: It'd be nice to set this based on sector size
|
||||
MaxDealStartDelay: Duration(time.Hour * 24 * 14),
|
||||
ExpectedSealDuration: Duration(time.Hour * 24),
|
||||
PublishMsgPeriod: Duration(time.Hour),
|
||||
MaxDealsPerPublishMsg: 8,
|
||||
MaxProviderCollateralMultiplier: 2,
|
||||
|
||||
SimultaneousTransfersForStorage: DefaultSimultaneousTransfers,
|
||||
SimultaneousTransfersForStoragePerClient: 0,
|
||||
SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers,
|
||||
|
||||
StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed
|
||||
|
||||
RetrievalPricing: &RetrievalPricing{
|
||||
Strategy: RetrievalPricingDefaultMode,
|
||||
Default: &RetrievalPricingDefault{
|
||||
VerifiedDealsFreeTransfer: true,
|
||||
},
|
||||
External: &RetrievalPricingExternal{
|
||||
Path: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
IndexProvider: IndexProviderConfig{
|
||||
Enable: true,
|
||||
EntriesCacheCapacity: 1024,
|
||||
EntriesChunkSize: 16384,
|
||||
// The default empty TopicName means it is inferred from network name, in the following
|
||||
// format: "/indexer/ingest/<network-name>"
|
||||
TopicName: "",
|
||||
PurgeCacheOnStart: false,
|
||||
},
|
||||
|
||||
Subsystems: MinerSubsystemConfig{
|
||||
EnableMining: true,
|
||||
EnableSealing: true,
|
||||
EnableSectorStorage: true,
|
||||
EnableMarkets: false,
|
||||
EnableSectorIndexDB: false,
|
||||
},
|
||||
|
||||
@ -270,12 +204,6 @@ func DefaultStorageMiner() *StorageMiner {
|
||||
DealPublishControl: []string{},
|
||||
},
|
||||
|
||||
DAGStore: DAGStoreConfig{
|
||||
MaxConcurrentIndex: 5,
|
||||
MaxConcurrencyStorageCalls: 100,
|
||||
MaxConcurrentUnseals: 5,
|
||||
GCInterval: Duration(1 * time.Minute),
|
||||
},
|
||||
HarmonyDB: HarmonyDB{
|
||||
Hosts: []string{"127.0.0.1"},
|
||||
Username: "yugabyte",
|
||||
|
||||
@ -79,9 +79,3 @@ func TestDefaultMinerRoundtrip(t *testing.T) {
|
||||
fmt.Println(c2)
|
||||
require.True(t, reflect.DeepEqual(c, c2))
|
||||
}
|
||||
|
||||
func TestDefaultStorageMiner_IsEmpty(t *testing.T) {
|
||||
subject := DefaultStorageMiner()
|
||||
require.True(t, subject.IndexProvider.Enable)
|
||||
require.Equal(t, "", subject.IndexProvider.TopicName)
|
||||
}
|
||||
|
||||
@ -85,30 +85,6 @@ your node if metadata log is disabled`,
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"Client": {
|
||||
{
|
||||
Name: "SimultaneousTransfersForStorage",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `The maximum number of simultaneous data transfers between the client
|
||||
and storage providers for storage deals`,
|
||||
},
|
||||
{
|
||||
Name: "SimultaneousTransfersForRetrieval",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `The maximum number of simultaneous data transfers between the client
|
||||
and storage providers for retrieval deals`,
|
||||
},
|
||||
{
|
||||
Name: "OffChainRetrieval",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Require that retrievals perform no on-chain operations. Paid retrievals
|
||||
without existing payment channels with available funds will fail instead
|
||||
of automatically performing on-chain operations.`,
|
||||
},
|
||||
},
|
||||
"Common": {
|
||||
{
|
||||
Name: "API",
|
||||
@ -141,197 +117,516 @@ of automatically performing on-chain operations.`,
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"DAGStoreConfig": {
|
||||
"CurioAddresses": {
|
||||
{
|
||||
Name: "RootDir",
|
||||
Name: "PreCommitControl",
|
||||
Type: "[]string",
|
||||
|
||||
Comment: `Addresses to send PreCommit messages from`,
|
||||
},
|
||||
{
|
||||
Name: "CommitControl",
|
||||
Type: "[]string",
|
||||
|
||||
Comment: `Addresses to send Commit messages from`,
|
||||
},
|
||||
{
|
||||
Name: "TerminateControl",
|
||||
Type: "[]string",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "DisableOwnerFallback",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `DisableOwnerFallback disables usage of the owner address for messages
|
||||
sent automatically`,
|
||||
},
|
||||
{
|
||||
Name: "DisableWorkerFallback",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `DisableWorkerFallback disables usage of the worker address for messages
|
||||
sent automatically, if control addresses are configured.
|
||||
A control address that doesn't have enough funds will still be chosen
|
||||
over the worker address if this flag is set.`,
|
||||
},
|
||||
{
|
||||
Name: "MinerAddresses",
|
||||
Type: "[]string",
|
||||
|
||||
Comment: `MinerAddresses are the addresses of the miner actors to use for sending messages`,
|
||||
},
|
||||
},
|
||||
"CurioAlerting": {
|
||||
{
|
||||
Name: "PagerDutyEventURL",
|
||||
Type: "string",
|
||||
|
||||
Comment: `Path to the dagstore root directory. This directory contains three
|
||||
subdirectories, which can be symlinked to alternative locations if
|
||||
need be:
|
||||
- ./transients: caches unsealed deals that have been fetched from the
|
||||
storage subsystem for serving retrievals.
|
||||
- ./indices: stores shard indices.
|
||||
- ./datastore: holds the KV store tracking the state of every shard
|
||||
known to the DAG store.
|
||||
Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
|
||||
<LOTUS_MINER_PATH>/dagstore (monolith deployment)`,
|
||||
Comment: `PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately
|
||||
routed to a PagerDuty.com service and processed.
|
||||
The default is sufficient for integration with the stock commercial PagerDuty.com company's service.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxConcurrentIndex",
|
||||
Name: "PageDutyIntegrationKey",
|
||||
Type: "string",
|
||||
|
||||
Comment: `PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service
|
||||
identifier in the integration page for the service.`,
|
||||
},
|
||||
{
|
||||
Name: "MinimumWalletBalance",
|
||||
Type: "types.FIL",
|
||||
|
||||
Comment: `MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an
|
||||
alerts will be triggered for the wallet`,
|
||||
},
|
||||
},
|
||||
"CurioConfig": {
|
||||
{
|
||||
Name: "Subsystems",
|
||||
Type: "CurioSubsystemsConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Fees",
|
||||
Type: "CurioFees",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Addresses",
|
||||
Type: "[]CurioAddresses",
|
||||
|
||||
Comment: `Addresses of wallets per MinerAddress (one of the fields).`,
|
||||
},
|
||||
{
|
||||
Name: "Proving",
|
||||
Type: "CurioProvingConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Ingest",
|
||||
Type: "CurioIngestConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Journal",
|
||||
Type: "JournalConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Apis",
|
||||
Type: "ApisConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Alerting",
|
||||
Type: "CurioAlerting",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"CurioFees": {
|
||||
{
|
||||
Name: "DefaultMaxFee",
|
||||
Type: "types.FIL",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "MaxPreCommitGasFee",
|
||||
Type: "types.FIL",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "MaxCommitGasFee",
|
||||
Type: "types.FIL",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "MaxPreCommitBatchGasFee",
|
||||
Type: "BatchFeeConfig",
|
||||
|
||||
Comment: `maxBatchFee = maxBase + maxPerSector * nSectors`,
|
||||
},
|
||||
{
|
||||
Name: "MaxCommitBatchGasFee",
|
||||
Type: "BatchFeeConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "MaxTerminateGasFee",
|
||||
Type: "types.FIL",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "MaxWindowPoStGasFee",
|
||||
Type: "types.FIL",
|
||||
|
||||
Comment: `WindowPoSt is a high-value operation, so the default fee should be high.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxPublishDealsFee",
|
||||
Type: "types.FIL",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"CurioIngestConfig": {
|
||||
{
|
||||
Name: "MaxQueueSDR",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of indexing jobs that can run simultaneously.
|
||||
0 means unlimited.
|
||||
Default value: 5.`,
|
||||
Comment: `Maximum number of sectors that can be queued waiting for SDR to start processing.
|
||||
0 = unlimited
|
||||
Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
The SDR queue includes deals which are in the process of entering the sealing pipeline - size of this queue
|
||||
will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
|
||||
SDR queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxConcurrentReadyFetches",
|
||||
Name: "MaxQueueTrees",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of unsealed deals that can be fetched simultaneously
|
||||
from the storage subsystem. 0 means unlimited.
|
||||
Default value: 0 (unlimited).`,
|
||||
Comment: `Maximum number of sectors that can be queued waiting for SDRTrees to start processing.
|
||||
0 = unlimited
|
||||
Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only
|
||||
applied to sectors entering the pipeline.`,
|
||||
},
|
||||
{
|
||||
Name: "MaxConcurrentUnseals",
|
||||
Name: "MaxQueuePoRep",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of unseals that can be processed simultaneously
|
||||
from the storage subsystem. 0 means unlimited.
|
||||
Default value: 0 (unlimited).`,
|
||||
Comment: `Maximum number of sectors that can be queued waiting for PoRep to start processing.
|
||||
0 = unlimited
|
||||
Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only
|
||||
applied to sectors entering the pipeline.`,
|
||||
},
|
||||
},
|
||||
"CurioProvingConfig": {
|
||||
{
|
||||
Name: "MaxConcurrencyStorageCalls",
|
||||
Name: "ParallelCheckLimit",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum number of simultaneous inflight API calls to the storage
|
||||
subsystem.
|
||||
Default value: 100.`,
|
||||
Comment: `Maximum number of sector checks to run in parallel. (0 = unlimited)
|
||||
|
||||
WARNING: Setting this value too high may make the node crash by running out of stack
|
||||
WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
|
||||
to late submission.
|
||||
|
||||
After changing this option, confirm that the new value works in your setup by invoking
|
||||
'lotus-miner proving compute window-post 0'`,
|
||||
},
|
||||
{
|
||||
Name: "GCInterval",
|
||||
Name: "SingleCheckTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `The time between calls to periodic dagstore GC, in time.Duration string
|
||||
representation, e.g. 1m, 5m, 1h.
|
||||
Default value: 1 minute.`,
|
||||
Comment: `Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
|
||||
|
||||
WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||
test challenge took longer than this timeout
|
||||
WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
|
||||
blocked (e.g. in case of disconnected NFS mount)`,
|
||||
},
|
||||
{
|
||||
Name: "PartitionCheckTimeout",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
|
||||
the partition which didn't get checked on time will be skipped
|
||||
|
||||
WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||
test challenge took longer than this timeout
|
||||
WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
|
||||
blocked or slow`,
|
||||
},
|
||||
{
|
||||
Name: "DisableWDPoStPreChecks",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Disable WindowPoSt provable sector readability checks.
|
||||
|
||||
In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
|
||||
from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
|
||||
we're only interested in checking that sector data can be read.
|
||||
|
||||
When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
|
||||
can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
|
||||
the builtin logic not skipping snark computation when some sectors need to be skipped.
|
||||
|
||||
When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
|
||||
if challenges for some sectors aren't readable, those sectors will just get skipped.
|
||||
|
||||
Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
|
||||
time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
|
||||
be negligible.
|
||||
|
||||
NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
|
||||
|
||||
NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
|
||||
sent to the chain
|
||||
|
||||
After changing this option, confirm that the new value works in your setup by invoking
|
||||
'lotus-miner proving compute window-post 0'`,
|
||||
},
|
||||
{
|
||||
Name: "MaxPartitionsPerPoStMessage",
|
||||
Type: "int",
|
||||
|
||||
Comment: `Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21)
|
||||
|
||||
A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||
//
|
||||
Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||
|
||||
Setting this value above the network limit has no effect`,
|
||||
},
|
||||
{
|
||||
Name: "MaxPartitionsPerRecoveryMessage",
|
||||
Type: "int",
|
||||
|
||||
Comment: `In some cases when submitting DeclareFaultsRecovered messages,
|
||||
there may be too many recoveries to fit in a BlockGasLimit.
|
||||
In those cases it may be necessary to set this value to something low (eg 1);
|
||||
Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
||||
resulting in more total gas use (but each message will have lower gas limit)`,
|
||||
},
|
||||
{
|
||||
Name: "SingleRecoveringPartitionPerPostMessage",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||
|
||||
In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||
too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||
with recovering sectors in the post message
|
||||
|
||||
Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||
to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)`,
|
||||
},
|
||||
},
|
||||
"CurioSubsystemsConfig": {
|
||||
{
|
||||
Name: "EnableWindowPost",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster
|
||||
with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
|
||||
machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline,
|
||||
will allow for parallel processing of partitions.
|
||||
|
||||
It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without
|
||||
the need for additional machines. In setups like this it is generally recommended to run
|
||||
partitionsPerDeadline+1 machines.`,
|
||||
},
|
||||
{
|
||||
Name: "WindowPostMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "EnableWinningPost",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableWinningPost enables winning post to be executed on this curio instance.
|
||||
Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler.
|
||||
It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost
|
||||
documentation.`,
|
||||
},
|
||||
{
|
||||
Name: "WinningPostMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "EnableParkPiece",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableParkPiece enables the "piece parking" task to run on this node. This task is responsible for fetching
|
||||
pieces from the network and storing them in the storage subsystem until sectors are sealed. This task is
|
||||
only applicable when integrating with boost, and should be enabled on nodes which will hold deal data
|
||||
from boost until sectors containing the related pieces have the TreeD/TreeR constructed.
|
||||
Note that future Curio implementations will have a separate task type for fetching pieces from the internet.`,
|
||||
},
|
||||
{
|
||||
Name: "ParkPieceMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "EnableSealSDR",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
|
||||
creating 11 layer files in sector cache directory.
|
||||
|
||||
SDR is the first task in the sealing pipeline. It's inputs are just the hash of the
|
||||
unsealed data (CommD), sector number, miner id, and the seal proof type.
|
||||
It's outputs are the 11 layer files in the sector cache directory.
|
||||
|
||||
In lotus-miner this was run as part of PreCommit1.`,
|
||||
},
|
||||
{
|
||||
Name: "SealSDRMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
also be bounded by resources available on the machine.`,
|
||||
},
|
||||
{
|
||||
Name: "EnableSealSDRTrees",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableSealSDRTrees enables the SDR pipeline tree-building task to run.
|
||||
This task handles encoding of unsealed data into last sdr layer and building
|
||||
of TreeR, TreeC and TreeD.
|
||||
|
||||
This task runs after SDR
|
||||
TreeD is first computed with optional input of unsealed data
|
||||
TreeR is computed from replica, which is first computed as field
|
||||
addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data)
|
||||
TreeC is computed from the 11 SDR layers
|
||||
The 3 trees will later be used to compute the PoRep proof.
|
||||
|
||||
In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers
|
||||
will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk)
|
||||
then using a small subset of them for the actual PoRep computation. This allows for significant scratch space
|
||||
saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step)
|
||||
|
||||
In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
||||
Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
|
||||
which just remove unneeded tree data after PoRep is computed.`,
|
||||
},
|
||||
{
|
||||
Name: "SealSDRTreesMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
also be bounded by resources available on the machine.`,
|
||||
},
|
||||
{
|
||||
Name: "FinalizeMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: `FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously.
|
||||
The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever
|
||||
machine holds sector cache files, as it removes unneeded tree data after PoRep is computed.
|
||||
Finalize will run in parallel with the SubmitCommitMsg task.`,
|
||||
},
|
||||
{
|
||||
Name: "EnableSendPrecommitMsg",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableSendPrecommitMsg enables the sending of precommit messages to the chain
|
||||
from this curio instance.
|
||||
This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message`,
|
||||
},
|
||||
{
|
||||
Name: "EnablePoRepProof",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnablePoRepProof enables the computation of the porep proof
|
||||
|
||||
This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the
|
||||
precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are
|
||||
requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees
|
||||
task.
|
||||
|
||||
In lotus-miner this was Commit1 / Commit2`,
|
||||
},
|
||||
{
|
||||
Name: "PoRepProofMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
also be bounded by resources available on the machine.`,
|
||||
},
|
||||
{
|
||||
Name: "EnableSendCommitMsg",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableSendCommitMsg enables the sending of commit messages to the chain
|
||||
from this curio instance.`,
|
||||
},
|
||||
{
|
||||
Name: "EnableMoveStorage",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance.
|
||||
This tasks should only be enabled on nodes with long-term storage.
|
||||
|
||||
The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the
|
||||
SDRTrees machine into long-term storage. This task runs after the Finalize task.`,
|
||||
},
|
||||
{
|
||||
Name: "MoveStorageMaxTasks",
|
||||
Type: "int",
|
||||
|
||||
Comment: `The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
also be bounded by resources available on the machine. It is recommended that this value is set to a number which
|
||||
uses all available network (or disk) bandwidth on the machine without causing bottlenecks.`,
|
||||
},
|
||||
{
|
||||
Name: "BoostAdapters",
|
||||
Type: "[]string",
|
||||
|
||||
Comment: `BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests.
|
||||
This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations.
|
||||
Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP.
|
||||
Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified.
|
||||
|
||||
When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the
|
||||
deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one
|
||||
node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data.
|
||||
This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was
|
||||
received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for
|
||||
the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are
|
||||
sealed.
|
||||
|
||||
To get API info for boost configuration run 'curio market rpc-info'
|
||||
|
||||
NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on
|
||||
a machine which handles ParkPiece tasks.`,
|
||||
},
|
||||
{
|
||||
Name: "EnableWebGui",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should
|
||||
only need to be run on a single machine in the cluster.`,
|
||||
},
|
||||
{
|
||||
Name: "GuiAddress",
|
||||
Type: "string",
|
||||
|
||||
Comment: `The address that should listen for Web GUI requests.`,
|
||||
},
|
||||
},
|
||||
"DealmakingConfig": {
|
||||
{
|
||||
Name: "ConsiderOnlineStorageDeals",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `When enabled, the miner can accept online deals`,
|
||||
},
|
||||
{
|
||||
Name: "ConsiderOfflineStorageDeals",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `When enabled, the miner can accept offline deals`,
|
||||
},
|
||||
{
|
||||
Name: "ConsiderOnlineRetrievalDeals",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `When enabled, the miner can accept retrieval deals`,
|
||||
},
|
||||
{
|
||||
Name: "ConsiderOfflineRetrievalDeals",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `When enabled, the miner can accept offline retrieval deals`,
|
||||
},
|
||||
{
|
||||
Name: "ConsiderVerifiedStorageDeals",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `When enabled, the miner can accept verified deals`,
|
||||
},
|
||||
{
|
||||
Name: "ConsiderUnverifiedStorageDeals",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `When enabled, the miner can accept unverified deals`,
|
||||
},
|
||||
{
|
||||
Name: "PieceCidBlocklist",
|
||||
Type: "[]cid.Cid",
|
||||
|
||||
Comment: `A list of Data CIDs to reject when making deals`,
|
||||
},
|
||||
{
|
||||
Name: "ExpectedSealDuration",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `Maximum expected amount of time getting the deal into a sealed sector will take
|
||||
This includes the time the deal will need to get transferred and published
|
||||
before being assigned to a sector`,
|
||||
},
|
||||
{
|
||||
Name: "MaxDealStartDelay",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `Maximum amount of time proposed deal StartEpoch can be in future`,
|
||||
},
|
||||
{
|
||||
Name: "PublishMsgPeriod",
|
||||
Type: "Duration",
|
||||
|
||||
Comment: `When a deal is ready to publish, the amount of time to wait for more
|
||||
deals to be ready to publish before publishing them all as a batch`,
|
||||
},
|
||||
{
|
||||
Name: "MaxDealsPerPublishMsg",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `The maximum number of deals to include in a single PublishStorageDeals
|
||||
message`,
|
||||
},
|
||||
{
|
||||
Name: "MaxProviderCollateralMultiplier",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `The maximum collateral that the provider will put up against a deal,
|
||||
as a multiplier of the minimum collateral bound`,
|
||||
},
|
||||
{
|
||||
Name: "MaxStagingDealsBytes",
|
||||
Type: "int64",
|
||||
|
||||
Comment: `The maximum allowed disk usage size in bytes of staging deals not yet
|
||||
passed to the sealing node by the markets service. 0 is unlimited.`,
|
||||
},
|
||||
{
|
||||
Name: "SimultaneousTransfersForStorage",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `The maximum number of parallel online data transfers for storage deals`,
|
||||
},
|
||||
{
|
||||
Name: "SimultaneousTransfersForStoragePerClient",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `The maximum number of simultaneous data transfers from any single client
|
||||
for storage deals.
|
||||
Unset by default (0), and values higher than SimultaneousTransfersForStorage
|
||||
will have no effect; i.e. the total number of simultaneous data transfers
|
||||
across all storage clients is bound by SimultaneousTransfersForStorage
|
||||
regardless of this number.`,
|
||||
},
|
||||
{
|
||||
Name: "SimultaneousTransfersForRetrieval",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `The maximum number of parallel online data transfers for retrieval deals`,
|
||||
},
|
||||
{
|
||||
Name: "StartEpochSealingBuffer",
|
||||
Type: "uint64",
|
||||
|
||||
Comment: `Minimum start epoch buffer to give time for sealing of sector with deal.`,
|
||||
},
|
||||
{
|
||||
Name: "Filter",
|
||||
Type: "string",
|
||||
|
||||
Comment: `A command used for fine-grained evaluation of storage deals
|
||||
see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`,
|
||||
},
|
||||
{
|
||||
Name: "RetrievalFilter",
|
||||
Type: "string",
|
||||
|
||||
Comment: `A command used for fine-grained evaluation of retrieval deals
|
||||
see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details`,
|
||||
},
|
||||
{
|
||||
Name: "RetrievalPricing",
|
||||
Type: "*RetrievalPricing",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"EventsConfig": {
|
||||
{
|
||||
@ -454,12 +749,6 @@ Set to 0 to keep all mappings`,
|
||||
},
|
||||
},
|
||||
"FullNode": {
|
||||
{
|
||||
Name: "Client",
|
||||
Type: "Client",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Wallet",
|
||||
Type: "Wallet",
|
||||
@ -545,51 +834,6 @@ in a cluster. Only 1 is required`,
|
||||
EnableMsgIndex enables indexing of messages on chain.`,
|
||||
},
|
||||
},
|
||||
"IndexProviderConfig": {
|
||||
{
|
||||
Name: "Enable",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `Enable set whether to enable indexing announcement to the network and expose endpoints that
|
||||
allow indexer nodes to process announcements. Enabled by default.`,
|
||||
},
|
||||
{
|
||||
Name: "EntriesCacheCapacity",
|
||||
Type: "int",
|
||||
|
||||
Comment: `EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement
|
||||
entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The
|
||||
maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and
|
||||
the length of multihashes being advertised. For example, advertising 128-bit long multihashes
|
||||
with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to
|
||||
256MiB when full.`,
|
||||
},
|
||||
{
|
||||
Name: "EntriesChunkSize",
|
||||
Type: "int",
|
||||
|
||||
Comment: `EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
|
||||
Defaults to 16384 if not specified. Note that chunks are chained together for indexing
|
||||
advertisements that include more multihashes than the configured EntriesChunkSize.`,
|
||||
},
|
||||
{
|
||||
Name: "TopicName",
|
||||
Type: "string",
|
||||
|
||||
Comment: `TopicName sets the topic name on which the changes to the advertised content are announced.
|
||||
If not explicitly specified, the topic name is automatically inferred from the network name
|
||||
in following format: '/indexer/ingest/<network-name>'
|
||||
Defaults to empty, which implies the topic name is inferred from network name.`,
|
||||
},
|
||||
{
|
||||
Name: "PurgeCacheOnStart",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine
|
||||
starts. By default, the cache is rehydrated from previously cached entries stored in
|
||||
datastore if any is present.`,
|
||||
},
|
||||
},
|
||||
"JournalConfig": {
|
||||
{
|
||||
Name: "DisabledEvents",
|
||||
@ -790,12 +1034,6 @@ over the worker address if this flag is set.`,
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "EnableMarkets",
|
||||
Type: "bool",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "EnableSectorIndexDB",
|
||||
Type: "bool",
|
||||
@ -1026,46 +1264,6 @@ This property is used only if ElasticSearchTracer propery is set.`,
|
||||
Comment: `Auth token that will be passed with logs to elasticsearch - used for weighted peers score.`,
|
||||
},
|
||||
},
|
||||
"RetrievalPricing": {
|
||||
{
|
||||
Name: "Strategy",
|
||||
Type: "string",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Default",
|
||||
Type: "*RetrievalPricingDefault",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "External",
|
||||
Type: "*RetrievalPricingExternal",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
},
|
||||
"RetrievalPricingDefault": {
|
||||
{
|
||||
Name: "VerifiedDealsFreeTransfer",
|
||||
Type: "bool",
|
||||
|
||||
Comment: `VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal
|
||||
of a payloadCid that belongs to a verified storage deal.
|
||||
This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default".
|
||||
default value is true`,
|
||||
},
|
||||
},
|
||||
"RetrievalPricingExternal": {
|
||||
{
|
||||
Name: "Path",
|
||||
Type: "string",
|
||||
|
||||
Comment: `Path of the external script that will be run to price a retrieval deal.
|
||||
This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".`,
|
||||
},
|
||||
},
|
||||
"SealerConfig": {
|
||||
{
|
||||
Name: "ParallelFetchLimit",
|
||||
@ -1483,12 +1681,6 @@ HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`,
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "IndexProvider",
|
||||
Type: "IndexProviderConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "Proving",
|
||||
Type: "ProvingConfig",
|
||||
@ -1519,12 +1711,6 @@ HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`,
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "DAGStore",
|
||||
Type: "DAGStoreConfig",
|
||||
|
||||
Comment: ``,
|
||||
},
|
||||
{
|
||||
Name: "HarmonyDB",
|
||||
Type: "HarmonyDB",
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
@ -22,7 +20,6 @@ type Common struct {
|
||||
// FullNode is a full node config
|
||||
type FullNode struct {
|
||||
Common
|
||||
Client Client
|
||||
Wallet Wallet
|
||||
Fees FeeConfig
|
||||
Chainstore Chainstore
|
||||
@ -53,17 +50,28 @@ type Logging struct {
|
||||
type StorageMiner struct {
|
||||
Common
|
||||
|
||||
Subsystems MinerSubsystemConfig
|
||||
Dealmaking DealmakingConfig
|
||||
IndexProvider IndexProviderConfig
|
||||
Proving ProvingConfig
|
||||
Sealing SealingConfig
|
||||
Storage SealerConfig
|
||||
Fees MinerFeeConfig
|
||||
Addresses MinerAddressConfig
|
||||
DAGStore DAGStoreConfig
|
||||
Subsystems MinerSubsystemConfig
|
||||
Dealmaking DealmakingConfig
|
||||
Proving ProvingConfig
|
||||
Sealing SealingConfig
|
||||
Storage SealerConfig
|
||||
Fees MinerFeeConfig
|
||||
Addresses MinerAddressConfig
|
||||
HarmonyDB HarmonyDB
|
||||
}
|
||||
|
||||
HarmonyDB HarmonyDB
|
||||
type CurioConfig struct {
|
||||
Subsystems CurioSubsystemsConfig
|
||||
|
||||
Fees CurioFees
|
||||
|
||||
// Addresses of wallets per MinerAddress (one of the fields).
|
||||
Addresses []CurioAddresses
|
||||
Proving CurioProvingConfig
|
||||
Ingest CurioIngestConfig
|
||||
Journal JournalConfig
|
||||
Apis ApisConfig
|
||||
Alerting CurioAlerting
|
||||
}
|
||||
|
||||
type ApisConfig struct {
|
||||
@ -81,50 +89,144 @@ type JournalConfig struct {
|
||||
DisabledEvents string
|
||||
}
|
||||
|
||||
type DAGStoreConfig struct {
|
||||
// Path to the dagstore root directory. This directory contains three
|
||||
// subdirectories, which can be symlinked to alternative locations if
|
||||
// need be:
|
||||
// - ./transients: caches unsealed deals that have been fetched from the
|
||||
// storage subsystem for serving retrievals.
|
||||
// - ./indices: stores shard indices.
|
||||
// - ./datastore: holds the KV store tracking the state of every shard
|
||||
// known to the DAG store.
|
||||
// Default value: <LOTUS_MARKETS_PATH>/dagstore (split deployment) or
|
||||
// <LOTUS_MINER_PATH>/dagstore (monolith deployment)
|
||||
RootDir string
|
||||
type CurioSubsystemsConfig struct {
|
||||
// EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster
|
||||
// with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple
|
||||
// machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline,
|
||||
// will allow for parallel processing of partitions.
|
||||
//
|
||||
// It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without
|
||||
// the need for additional machines. In setups like this it is generally recommended to run
|
||||
// partitionsPerDeadline+1 machines.
|
||||
EnableWindowPost bool
|
||||
WindowPostMaxTasks int
|
||||
|
||||
// The maximum amount of indexing jobs that can run simultaneously.
|
||||
// 0 means unlimited.
|
||||
// Default value: 5.
|
||||
MaxConcurrentIndex int
|
||||
// EnableWinningPost enables winning post to be executed on this curio instance.
|
||||
// Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler.
|
||||
// It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost
|
||||
// documentation.
|
||||
EnableWinningPost bool
|
||||
WinningPostMaxTasks int
|
||||
|
||||
// The maximum amount of unsealed deals that can be fetched simultaneously
|
||||
// from the storage subsystem. 0 means unlimited.
|
||||
// Default value: 0 (unlimited).
|
||||
MaxConcurrentReadyFetches int
|
||||
// EnableParkPiece enables the "piece parking" task to run on this node. This task is responsible for fetching
|
||||
// pieces from the network and storing them in the storage subsystem until sectors are sealed. This task is
|
||||
// only applicable when integrating with boost, and should be enabled on nodes which will hold deal data
|
||||
// from boost until sectors containing the related pieces have the TreeD/TreeR constructed.
|
||||
// Note that future Curio implementations will have a separate task type for fetching pieces from the internet.
|
||||
EnableParkPiece bool
|
||||
ParkPieceMaxTasks int
|
||||
|
||||
// The maximum amount of unseals that can be processed simultaneously
|
||||
// from the storage subsystem. 0 means unlimited.
|
||||
// Default value: 0 (unlimited).
|
||||
MaxConcurrentUnseals int
|
||||
// EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation
|
||||
// creating 11 layer files in sector cache directory.
|
||||
//
|
||||
// SDR is the first task in the sealing pipeline. It's inputs are just the hash of the
|
||||
// unsealed data (CommD), sector number, miner id, and the seal proof type.
|
||||
// It's outputs are the 11 layer files in the sector cache directory.
|
||||
//
|
||||
// In lotus-miner this was run as part of PreCommit1.
|
||||
EnableSealSDR bool
|
||||
|
||||
// The maximum number of simultaneous inflight API calls to the storage
|
||||
// subsystem.
|
||||
// Default value: 100.
|
||||
MaxConcurrencyStorageCalls int
|
||||
// The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
// also be bounded by resources available on the machine.
|
||||
SealSDRMaxTasks int
|
||||
|
||||
// The time between calls to periodic dagstore GC, in time.Duration string
|
||||
// representation, e.g. 1m, 5m, 1h.
|
||||
// Default value: 1 minute.
|
||||
GCInterval Duration
|
||||
// EnableSealSDRTrees enables the SDR pipeline tree-building task to run.
|
||||
// This task handles encoding of unsealed data into last sdr layer and building
|
||||
// of TreeR, TreeC and TreeD.
|
||||
//
|
||||
// This task runs after SDR
|
||||
// TreeD is first computed with optional input of unsealed data
|
||||
// TreeR is computed from replica, which is first computed as field
|
||||
// addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data)
|
||||
// TreeC is computed from the 11 SDR layers
|
||||
// The 3 trees will later be used to compute the PoRep proof.
|
||||
//
|
||||
// In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers
|
||||
// will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk)
|
||||
// then using a small subset of them for the actual PoRep computation. This allows for significant scratch space
|
||||
// saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step)
|
||||
//
|
||||
// In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1).
|
||||
// Note that nodes with SDRTrees enabled will also answer to Finalize tasks,
|
||||
// which just remove unneeded tree data after PoRep is computed.
|
||||
EnableSealSDRTrees bool
|
||||
|
||||
// The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
// also be bounded by resources available on the machine.
|
||||
SealSDRTreesMaxTasks int
|
||||
|
||||
// FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously.
|
||||
// The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever
|
||||
// machine holds sector cache files, as it removes unneeded tree data after PoRep is computed.
|
||||
// Finalize will run in parallel with the SubmitCommitMsg task.
|
||||
FinalizeMaxTasks int
|
||||
|
||||
// EnableSendPrecommitMsg enables the sending of precommit messages to the chain
|
||||
// from this curio instance.
|
||||
// This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message
|
||||
EnableSendPrecommitMsg bool
|
||||
|
||||
// EnablePoRepProof enables the computation of the porep proof
|
||||
//
|
||||
// This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the
|
||||
// precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are
|
||||
// requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees
|
||||
// task.
|
||||
//
|
||||
// In lotus-miner this was Commit1 / Commit2
|
||||
EnablePoRepProof bool
|
||||
|
||||
// The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
// also be bounded by resources available on the machine.
|
||||
PoRepProofMaxTasks int
|
||||
|
||||
// EnableSendCommitMsg enables the sending of commit messages to the chain
|
||||
// from this curio instance.
|
||||
EnableSendCommitMsg bool
|
||||
|
||||
// EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance.
|
||||
// This tasks should only be enabled on nodes with long-term storage.
|
||||
//
|
||||
// The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the
|
||||
// SDRTrees machine into long-term storage. This task runs after the Finalize task.
|
||||
EnableMoveStorage bool
|
||||
|
||||
// The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will
|
||||
// also be bounded by resources available on the machine. It is recommended that this value is set to a number which
|
||||
// uses all available network (or disk) bandwidth on the machine without causing bottlenecks.
|
||||
MoveStorageMaxTasks int
|
||||
|
||||
// BoostAdapters is a list of tuples of miner address and port/ip to listen for market (e.g. boost) requests.
|
||||
// This interface is compatible with the lotus-miner RPC, implementing a subset needed for storage market operations.
|
||||
// Strings should be in the format "actor:ip:port". IP cannot be 0.0.0.0. We recommend using a private IP.
|
||||
// Example: "f0123:127.0.0.1:32100". Multiple addresses can be specified.
|
||||
//
|
||||
// When a market node like boost gives Curio's market RPC a deal to placing into a sector, Curio will first store the
|
||||
// deal data in a temporary location "Piece Park" before assigning it to a sector. This requires that at least one
|
||||
// node in the cluster has the EnableParkPiece option enabled and has sufficient scratch space to store the deal data.
|
||||
// This is different from lotus-miner which stored the deal data into an "unsealed" sector as soon as the deal was
|
||||
// received. Deal data in PiecePark is accessed when the sector TreeD and TreeR are computed, but isn't needed for
|
||||
// the initial SDR layers computation. Pieces in PiecePark are removed after all sectors referencing the piece are
|
||||
// sealed.
|
||||
//
|
||||
// To get API info for boost configuration run 'curio market rpc-info'
|
||||
//
|
||||
// NOTE: All deal data will flow through this service, so it should be placed on a machine running boost or on
|
||||
// a machine which handles ParkPiece tasks.
|
||||
BoostAdapters []string
|
||||
|
||||
// EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should
|
||||
// only need to be run on a single machine in the cluster.
|
||||
EnableWebGui bool
|
||||
|
||||
// The address that should listen for Web GUI requests.
|
||||
GuiAddress string
|
||||
}
|
||||
|
||||
type MinerSubsystemConfig struct {
|
||||
EnableMining bool
|
||||
EnableSealing bool
|
||||
EnableSectorStorage bool
|
||||
EnableMarkets bool
|
||||
|
||||
// When enabled, the sector index will reside in an external database
|
||||
// as opposed to the local KV store in the miner process
|
||||
@ -155,111 +257,8 @@ type MinerSubsystemConfig struct {
|
||||
}
|
||||
|
||||
type DealmakingConfig struct {
|
||||
// When enabled, the miner can accept online deals
|
||||
ConsiderOnlineStorageDeals bool
|
||||
// When enabled, the miner can accept offline deals
|
||||
ConsiderOfflineStorageDeals bool
|
||||
// When enabled, the miner can accept retrieval deals
|
||||
ConsiderOnlineRetrievalDeals bool
|
||||
// When enabled, the miner can accept offline retrieval deals
|
||||
ConsiderOfflineRetrievalDeals bool
|
||||
// When enabled, the miner can accept verified deals
|
||||
ConsiderVerifiedStorageDeals bool
|
||||
// When enabled, the miner can accept unverified deals
|
||||
ConsiderUnverifiedStorageDeals bool
|
||||
// A list of Data CIDs to reject when making deals
|
||||
PieceCidBlocklist []cid.Cid
|
||||
// Maximum expected amount of time getting the deal into a sealed sector will take
|
||||
// This includes the time the deal will need to get transferred and published
|
||||
// before being assigned to a sector
|
||||
ExpectedSealDuration Duration
|
||||
// Maximum amount of time proposed deal StartEpoch can be in future
|
||||
MaxDealStartDelay Duration
|
||||
// When a deal is ready to publish, the amount of time to wait for more
|
||||
// deals to be ready to publish before publishing them all as a batch
|
||||
PublishMsgPeriod Duration
|
||||
// The maximum number of deals to include in a single PublishStorageDeals
|
||||
// message
|
||||
MaxDealsPerPublishMsg uint64
|
||||
// The maximum collateral that the provider will put up against a deal,
|
||||
// as a multiplier of the minimum collateral bound
|
||||
MaxProviderCollateralMultiplier uint64
|
||||
// The maximum allowed disk usage size in bytes of staging deals not yet
|
||||
// passed to the sealing node by the markets service. 0 is unlimited.
|
||||
MaxStagingDealsBytes int64
|
||||
// The maximum number of parallel online data transfers for storage deals
|
||||
SimultaneousTransfersForStorage uint64
|
||||
// The maximum number of simultaneous data transfers from any single client
|
||||
// for storage deals.
|
||||
// Unset by default (0), and values higher than SimultaneousTransfersForStorage
|
||||
// will have no effect; i.e. the total number of simultaneous data transfers
|
||||
// across all storage clients is bound by SimultaneousTransfersForStorage
|
||||
// regardless of this number.
|
||||
SimultaneousTransfersForStoragePerClient uint64
|
||||
// The maximum number of parallel online data transfers for retrieval deals
|
||||
SimultaneousTransfersForRetrieval uint64
|
||||
// Minimum start epoch buffer to give time for sealing of sector with deal.
|
||||
StartEpochSealingBuffer uint64
|
||||
|
||||
// A command used for fine-grained evaluation of storage deals
|
||||
// see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
|
||||
Filter string
|
||||
// A command used for fine-grained evaluation of retrieval deals
|
||||
// see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#using-filters-for-fine-grained-storage-and-retrieval-deal-acceptance for more details
|
||||
RetrievalFilter string
|
||||
|
||||
RetrievalPricing *RetrievalPricing
|
||||
}
|
||||
|
||||
type IndexProviderConfig struct {
|
||||
// Enable set whether to enable indexing announcement to the network and expose endpoints that
|
||||
// allow indexer nodes to process announcements. Enabled by default.
|
||||
Enable bool
|
||||
|
||||
// EntriesCacheCapacity sets the maximum capacity to use for caching the indexing advertisement
|
||||
// entries. Defaults to 1024 if not specified. The cache is evicted using LRU policy. The
|
||||
// maximum storage used by the cache is a factor of EntriesCacheCapacity, EntriesChunkSize and
|
||||
// the length of multihashes being advertised. For example, advertising 128-bit long multihashes
|
||||
// with the default EntriesCacheCapacity, and EntriesChunkSize means the cache size can grow to
|
||||
// 256MiB when full.
|
||||
EntriesCacheCapacity int
|
||||
|
||||
// EntriesChunkSize sets the maximum number of multihashes to include in a single entries chunk.
|
||||
// Defaults to 16384 if not specified. Note that chunks are chained together for indexing
|
||||
// advertisements that include more multihashes than the configured EntriesChunkSize.
|
||||
EntriesChunkSize int
|
||||
|
||||
// TopicName sets the topic name on which the changes to the advertised content are announced.
|
||||
// If not explicitly specified, the topic name is automatically inferred from the network name
|
||||
// in following format: '/indexer/ingest/<network-name>'
|
||||
// Defaults to empty, which implies the topic name is inferred from network name.
|
||||
TopicName string
|
||||
|
||||
// PurgeCacheOnStart sets whether to clear any cached entries chunks when the provider engine
|
||||
// starts. By default, the cache is rehydrated from previously cached entries stored in
|
||||
// datastore if any is present.
|
||||
PurgeCacheOnStart bool
|
||||
}
|
||||
|
||||
type RetrievalPricing struct {
|
||||
Strategy string // possible values: "default", "external"
|
||||
|
||||
Default *RetrievalPricingDefault
|
||||
External *RetrievalPricingExternal
|
||||
}
|
||||
|
||||
type RetrievalPricingExternal struct {
|
||||
// Path of the external script that will be run to price a retrieval deal.
|
||||
// This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "external".
|
||||
Path string
|
||||
}
|
||||
|
||||
type RetrievalPricingDefault struct {
|
||||
// VerifiedDealsFreeTransfer configures zero fees for data transfer for a retrieval deal
|
||||
// of a payloadCid that belongs to a verified storage deal.
|
||||
// This parameter is ONLY applicable if the retrieval pricing policy strategy has been configured to "default".
|
||||
// default value is true
|
||||
VerifiedDealsFreeTransfer bool
|
||||
}
|
||||
|
||||
type ProvingConfig struct {
|
||||
@ -544,6 +543,20 @@ type MinerFeeConfig struct {
|
||||
MaximizeWindowPoStFeeCap bool
|
||||
}
|
||||
|
||||
type CurioFees struct {
|
||||
DefaultMaxFee types.FIL
|
||||
MaxPreCommitGasFee types.FIL
|
||||
MaxCommitGasFee types.FIL
|
||||
|
||||
// maxBatchFee = maxBase + maxPerSector * nSectors
|
||||
MaxPreCommitBatchGasFee BatchFeeConfig
|
||||
MaxCommitBatchGasFee BatchFeeConfig
|
||||
|
||||
MaxTerminateGasFee types.FIL
|
||||
// WindowPoSt is a high-value operation, so the default fee should be high.
|
||||
MaxWindowPoStGasFee types.FIL
|
||||
MaxPublishDealsFee types.FIL
|
||||
}
|
||||
type MinerAddressConfig struct {
|
||||
// Addresses to send PreCommit messages from
|
||||
PreCommitControl []string
|
||||
@ -562,6 +575,135 @@ type MinerAddressConfig struct {
|
||||
DisableWorkerFallback bool
|
||||
}
|
||||
|
||||
type CurioAddresses struct {
|
||||
// Addresses to send PreCommit messages from
|
||||
PreCommitControl []string
|
||||
// Addresses to send Commit messages from
|
||||
CommitControl []string
|
||||
TerminateControl []string
|
||||
|
||||
// DisableOwnerFallback disables usage of the owner address for messages
|
||||
// sent automatically
|
||||
DisableOwnerFallback bool
|
||||
// DisableWorkerFallback disables usage of the worker address for messages
|
||||
// sent automatically, if control addresses are configured.
|
||||
// A control address that doesn't have enough funds will still be chosen
|
||||
// over the worker address if this flag is set.
|
||||
DisableWorkerFallback bool
|
||||
|
||||
// MinerAddresses are the addresses of the miner actors to use for sending messages
|
||||
MinerAddresses []string
|
||||
}
|
||||
|
||||
type CurioProvingConfig struct {
|
||||
// Maximum number of sector checks to run in parallel. (0 = unlimited)
|
||||
//
|
||||
// WARNING: Setting this value too high may make the node crash by running out of stack
|
||||
// WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due
|
||||
// to late submission.
|
||||
//
|
||||
// After changing this option, confirm that the new value works in your setup by invoking
|
||||
// 'lotus-miner proving compute window-post 0'
|
||||
ParallelCheckLimit int
|
||||
|
||||
// Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
|
||||
//
|
||||
// WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||
// test challenge took longer than this timeout
|
||||
// WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
|
||||
// blocked (e.g. in case of disconnected NFS mount)
|
||||
SingleCheckTimeout Duration
|
||||
|
||||
// Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
|
||||
// the partition which didn't get checked on time will be skipped
|
||||
//
|
||||
// WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||
// test challenge took longer than this timeout
|
||||
// WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
|
||||
// blocked or slow
|
||||
PartitionCheckTimeout Duration
|
||||
|
||||
// Disable WindowPoSt provable sector readability checks.
|
||||
//
|
||||
// In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges
|
||||
// from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as
|
||||
// we're only interested in checking that sector data can be read.
|
||||
//
|
||||
// When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process
|
||||
// can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by
|
||||
// the builtin logic not skipping snark computation when some sectors need to be skipped.
|
||||
//
|
||||
// When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and
|
||||
// if challenges for some sectors aren't readable, those sectors will just get skipped.
|
||||
//
|
||||
// Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter
|
||||
// time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should
|
||||
// be negligible.
|
||||
//
|
||||
// NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers.
|
||||
//
|
||||
// NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is
|
||||
// sent to the chain
|
||||
//
|
||||
// After changing this option, confirm that the new value works in your setup by invoking
|
||||
// 'lotus-miner proving compute window-post 0'
|
||||
DisableWDPoStPreChecks bool
|
||||
|
||||
// Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21)
|
||||
//
|
||||
// A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors.
|
||||
// //
|
||||
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||
//
|
||||
// Setting this value above the network limit has no effect
|
||||
MaxPartitionsPerPoStMessage int
|
||||
|
||||
// Maximum number of partitions to declare in a single DeclareFaultsRecovered message. 0 = no limit.
|
||||
|
||||
// In some cases when submitting DeclareFaultsRecovered messages,
|
||||
// there may be too many recoveries to fit in a BlockGasLimit.
|
||||
// In those cases it may be necessary to set this value to something low (eg 1);
|
||||
// Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed,
|
||||
// resulting in more total gas use (but each message will have lower gas limit)
|
||||
MaxPartitionsPerRecoveryMessage int
|
||||
|
||||
// Enable single partition per PoSt Message for partitions containing recovery sectors
|
||||
//
|
||||
// In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be
|
||||
// too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition
|
||||
// with recovering sectors in the post message
|
||||
//
|
||||
// Note that setting this value lower may result in less efficient gas use - more messages will be sent,
|
||||
// to prove each deadline, resulting in more total gas use (but each message will have lower gas limit)
|
||||
SingleRecoveringPartitionPerPostMessage bool
|
||||
}
|
||||
|
||||
type CurioIngestConfig struct {
|
||||
// Maximum number of sectors that can be queued waiting for SDR to start processing.
|
||||
// 0 = unlimited
|
||||
// Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
// The SDR queue includes deals which are in the process of entering the sealing pipeline - size of this queue
|
||||
// will also impact the maximum number of ParkPiece tasks which can run concurrently.
|
||||
//
|
||||
// SDR queue is the first queue in the sealing pipeline, meaning that it should be used as the primary backpressure mechanism.
|
||||
MaxQueueSDR int
|
||||
|
||||
// Maximum number of sectors that can be queued waiting for SDRTrees to start processing.
|
||||
// 0 = unlimited
|
||||
// Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
// In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only
|
||||
// applied to sectors entering the pipeline.
|
||||
MaxQueueTrees int
|
||||
|
||||
// Maximum number of sectors that can be queued waiting for PoRep to start processing.
|
||||
// 0 = unlimited
|
||||
// Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem.
|
||||
// Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only
|
||||
// applied to sectors entering the pipeline.
|
||||
MaxQueuePoRep int
|
||||
}
|
||||
|
||||
// API contains configs for API endpoint
|
||||
type API struct {
|
||||
// Binding address for the Lotus API
|
||||
@ -675,20 +817,6 @@ type Splitstore struct {
|
||||
}
|
||||
|
||||
// // Full Node
|
||||
type Client struct {
|
||||
// The maximum number of simultaneous data transfers between the client
|
||||
// and storage providers for storage deals
|
||||
SimultaneousTransfersForStorage uint64
|
||||
// The maximum number of simultaneous data transfers between the client
|
||||
// and storage providers for retrieval deals
|
||||
SimultaneousTransfersForRetrieval uint64
|
||||
|
||||
// Require that retrievals perform no on-chain operations. Paid retrievals
|
||||
// without existing payment channels with available funds will fail instead
|
||||
// of automatically performing on-chain operations.
|
||||
OffChainRetrieval bool
|
||||
}
|
||||
|
||||
type Wallet struct {
|
||||
RemoteBackend string
|
||||
EnableLedger bool
|
||||
@ -819,3 +947,18 @@ type FaultReporterConfig struct {
|
||||
// rewards. This address should have adequate funds to cover gas fees.
|
||||
ConsensusFaultReporterAddress string
|
||||
}
|
||||
|
||||
type CurioAlerting struct {
|
||||
// PagerDutyEventURL is URL for PagerDuty.com Events API v2 URL. Events sent to this API URL are ultimately
|
||||
// routed to a PagerDuty.com service and processed.
|
||||
// The default is sufficient for integration with the stock commercial PagerDuty.com company's service.
|
||||
PagerDutyEventURL string
|
||||
|
||||
// PageDutyIntegrationKey is the integration key for a PagerDuty.com service. You can find this unique service
|
||||
// identifier in the integration page for the service.
|
||||
PageDutyIntegrationKey string
|
||||
|
||||
// MinimumWalletBalance is the minimum balance all active wallets. If the balance is below this value, an
|
||||
// alerts will be triggered for the wallet
|
||||
MinimumWalletBalance types.FIL
|
||||
}
|
||||
|
||||
@ -3,34 +3,18 @@ package impl
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
gsimpl "github.com/ipfs/go-graphsync/impl"
|
||||
"github.com/ipfs/go-graphsync/peerstate"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
"github.com/filecoin-project/dagstore/shard"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
gst "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
filmktsstore "github.com/filecoin-project/go-fil-markets/stores"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
@ -46,8 +30,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
mktsdagstore "github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -76,18 +58,7 @@ type StorageMinerAPI struct {
|
||||
RemoteStore *paths.Remote
|
||||
|
||||
// Markets
|
||||
PieceStore dtypes.ProviderPieceStore `optional:"true"`
|
||||
StorageProvider storagemarket.StorageProvider `optional:"true"`
|
||||
RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"`
|
||||
SectorAccessor retrievalmarket.SectorAccessor `optional:"true"`
|
||||
DataTransfer dtypes.ProviderDataTransfer `optional:"true"`
|
||||
StagingGraphsync dtypes.StagingGraphsync `optional:"true"`
|
||||
Transport dtypes.ProviderTransport `optional:"true"`
|
||||
DealPublisher *storageadapter.DealPublisher `optional:"true"`
|
||||
SectorBlocks *sectorblocks.SectorBlocks `optional:"true"`
|
||||
Host host.Host `optional:"true"`
|
||||
DAGStore *dagstore.DAGStore `optional:"true"`
|
||||
DAGStoreWrapper *mktsdagstore.Wrapper `optional:"true"`
|
||||
SectorBlocks *sectorblocks.SectorBlocks `optional:"true"`
|
||||
|
||||
// Miner / storage
|
||||
Miner *sealing.Sealing `optional:"true"`
|
||||
@ -106,24 +77,10 @@ type StorageMinerAPI struct {
|
||||
// StorageService is populated when we're not the main storage node (e.g. we're a markets node)
|
||||
StorageService modules.MinerStorageService `optional:"true"`
|
||||
|
||||
ConsiderOnlineStorageDealsConfigFunc dtypes.ConsiderOnlineStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOnlineStorageDealsConfigFunc dtypes.SetConsiderOnlineStorageDealsConfigFunc `optional:"true"`
|
||||
ConsiderOnlineRetrievalDealsConfigFunc dtypes.ConsiderOnlineRetrievalDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOnlineRetrievalDealsConfigFunc dtypes.SetConsiderOnlineRetrievalDealsConfigFunc `optional:"true"`
|
||||
StorageDealPieceCidBlocklistConfigFunc dtypes.StorageDealPieceCidBlocklistConfigFunc `optional:"true"`
|
||||
SetStorageDealPieceCidBlocklistConfigFunc dtypes.SetStorageDealPieceCidBlocklistConfigFunc `optional:"true"`
|
||||
ConsiderOfflineStorageDealsConfigFunc dtypes.ConsiderOfflineStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOfflineStorageDealsConfigFunc dtypes.SetConsiderOfflineStorageDealsConfigFunc `optional:"true"`
|
||||
ConsiderOfflineRetrievalDealsConfigFunc dtypes.ConsiderOfflineRetrievalDealsConfigFunc `optional:"true"`
|
||||
SetConsiderOfflineRetrievalDealsConfigFunc dtypes.SetConsiderOfflineRetrievalDealsConfigFunc `optional:"true"`
|
||||
ConsiderVerifiedStorageDealsConfigFunc dtypes.ConsiderVerifiedStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderVerifiedStorageDealsConfigFunc dtypes.SetConsiderVerifiedStorageDealsConfigFunc `optional:"true"`
|
||||
ConsiderUnverifiedStorageDealsConfigFunc dtypes.ConsiderUnverifiedStorageDealsConfigFunc `optional:"true"`
|
||||
SetConsiderUnverifiedStorageDealsConfigFunc dtypes.SetConsiderUnverifiedStorageDealsConfigFunc `optional:"true"`
|
||||
SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"`
|
||||
GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"`
|
||||
GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"`
|
||||
SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"`
|
||||
SetSealingConfigFunc dtypes.SetSealingConfigFunc `optional:"true"`
|
||||
GetSealingConfigFunc dtypes.GetSealingConfigFunc `optional:"true"`
|
||||
GetExpectedSealDurationFunc dtypes.GetExpectedSealDurationFunc `optional:"true"`
|
||||
SetExpectedSealDurationFunc dtypes.SetExpectedSealDurationFunc `optional:"true"`
|
||||
|
||||
HarmonyDB *harmonydb.DB `optional:"true"`
|
||||
}
|
||||
@ -533,16 +490,6 @@ func (sm *StorageMinerAPI) SealingRemoveRequest(ctx context.Context, schedId uui
|
||||
return sm.StorageMgr.RemoveSchedRequest(ctx, schedId)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketImportDealData(ctx context.Context, propCid cid.Cid, path string) error {
|
||||
fi, err := os.Open(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer fi.Close() //nolint:errcheck
|
||||
|
||||
return sm.StorageProvider.ImportDataForDeal(ctx, propCid, fi)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) listDeals(ctx context.Context) ([]*api.MarketDeal, error) {
|
||||
ts, err := sm.Full.ChainHead(ctx)
|
||||
if err != nil {
|
||||
@ -569,671 +516,6 @@ func (sm *StorageMinerAPI) MarketListDeals(ctx context.Context) ([]*api.MarketDe
|
||||
return sm.listDeals(ctx)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) {
|
||||
return []struct{}{}, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) {
|
||||
results := make(chan storagemarket.MinerDeal)
|
||||
unsub := sm.StorageProvider.SubscribeToEvents(func(evt storagemarket.ProviderEvent, deal storagemarket.MinerDeal) {
|
||||
select {
|
||||
case results <- deal:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
})
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
unsub()
|
||||
close(results)
|
||||
}()
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) {
|
||||
return sm.StorageProvider.ListLocalDeals()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error {
|
||||
options := []storagemarket.StorageAskOption{
|
||||
storagemarket.MinPieceSize(minPieceSize),
|
||||
storagemarket.MaxPieceSize(maxPieceSize),
|
||||
}
|
||||
|
||||
return sm.StorageProvider.SetAsk(price, verifiedPrice, duration, options...)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) {
|
||||
return sm.StorageProvider.GetAsk(), nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error {
|
||||
sm.RetrievalProvider.SetAsk(rask)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) {
|
||||
return sm.RetrievalProvider.GetAsk(), nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) {
|
||||
inProgressChannels, err := sm.DataTransfer.InProgressChannels(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiChannels := make([]api.DataTransferChannel, 0, len(inProgressChannels))
|
||||
for _, channelState := range inProgressChannels {
|
||||
apiChannels = append(apiChannels, api.NewDataTransferChannel(sm.Host.ID(), channelState))
|
||||
}
|
||||
|
||||
return apiChannels, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
|
||||
selfPeer := sm.Host.ID()
|
||||
if isInitiator {
|
||||
return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID})
|
||||
}
|
||||
return sm.DataTransfer.RestartDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID})
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error {
|
||||
selfPeer := sm.Host.ID()
|
||||
if isInitiator {
|
||||
return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: selfPeer, Responder: otherPeer, ID: transferID})
|
||||
}
|
||||
return sm.DataTransfer.CloseDataTransferChannel(ctx, datatransfer.ChannelID{Initiator: otherPeer, Responder: selfPeer, ID: transferID})
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) {
|
||||
channels := make(chan api.DataTransferChannel)
|
||||
|
||||
unsub := sm.DataTransfer.SubscribeToEvents(func(evt datatransfer.Event, channelState datatransfer.ChannelState) {
|
||||
channel := api.NewDataTransferChannel(sm.Host.ID(), channelState)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case channels <- channel:
|
||||
}
|
||||
})
|
||||
|
||||
go func() {
|
||||
defer unsub()
|
||||
<-ctx.Done()
|
||||
}()
|
||||
|
||||
return channels, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketDataTransferDiagnostics(ctx context.Context, mpid peer.ID) (*api.TransferDiagnostics, error) {
|
||||
gsTransport, ok := sm.Transport.(*gst.Transport)
|
||||
if !ok {
|
||||
return nil, errors.New("api only works for graphsync as transport")
|
||||
}
|
||||
graphsyncConcrete, ok := sm.StagingGraphsync.(*gsimpl.GraphSync)
|
||||
if !ok {
|
||||
return nil, errors.New("api only works for non-mock graphsync implementation")
|
||||
}
|
||||
|
||||
inProgressChannels, err := sm.DataTransfer.InProgressChannels(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allReceivingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState)
|
||||
allSendingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState)
|
||||
for channelID, channel := range inProgressChannels {
|
||||
if channel.OtherPeer() != mpid {
|
||||
continue
|
||||
}
|
||||
if channel.Status() == datatransfer.Completed {
|
||||
continue
|
||||
}
|
||||
if channel.Status() == datatransfer.Failed || channel.Status() == datatransfer.Cancelled {
|
||||
continue
|
||||
}
|
||||
if channel.SelfPeer() == channel.Sender() {
|
||||
allSendingChannels[channelID] = channel
|
||||
} else {
|
||||
allReceivingChannels[channelID] = channel
|
||||
}
|
||||
}
|
||||
|
||||
// gather information about active transport channels
|
||||
transportChannels := gsTransport.ChannelsForPeer(mpid)
|
||||
// gather information about graphsync state for peer
|
||||
gsPeerState := graphsyncConcrete.PeerState(mpid)
|
||||
|
||||
sendingTransfers := sm.generateTransfers(ctx, transportChannels.SendingChannels, gsPeerState.IncomingState, allSendingChannels)
|
||||
receivingTransfers := sm.generateTransfers(ctx, transportChannels.ReceivingChannels, gsPeerState.OutgoingState, allReceivingChannels)
|
||||
|
||||
return &api.TransferDiagnostics{
|
||||
SendingTransfers: sendingTransfers,
|
||||
ReceivingTransfers: receivingTransfers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// generate transfers matches graphsync state and data transfer state for a given peer
|
||||
// to produce detailed output on what's happening with a transfer
|
||||
func (sm *StorageMinerAPI) generateTransfers(ctx context.Context,
|
||||
transportChannels map[datatransfer.ChannelID]gst.ChannelGraphsyncRequests,
|
||||
gsPeerState peerstate.PeerState,
|
||||
allChannels map[datatransfer.ChannelID]datatransfer.ChannelState) []*api.GraphSyncDataTransfer {
|
||||
tc := &transferConverter{
|
||||
matchedChannelIds: make(map[datatransfer.ChannelID]struct{}),
|
||||
matchedRequests: make(map[graphsync.RequestID]*api.GraphSyncDataTransfer),
|
||||
gsDiagnostics: gsPeerState.Diagnostics(),
|
||||
requestStates: gsPeerState.RequestStates,
|
||||
allChannels: allChannels,
|
||||
}
|
||||
|
||||
// iterate through all operating data transfer transport channels
|
||||
for channelID, channelRequests := range transportChannels {
|
||||
originalState, err := sm.DataTransfer.ChannelState(ctx, channelID)
|
||||
var baseDiagnostics []string
|
||||
var channelState *api.DataTransferChannel
|
||||
if err != nil {
|
||||
baseDiagnostics = append(baseDiagnostics, fmt.Sprintf("Unable to lookup channel state: %s", err))
|
||||
} else {
|
||||
cs := api.NewDataTransferChannel(sm.Host.ID(), originalState)
|
||||
channelState = &cs
|
||||
}
|
||||
// add the current request for this channel
|
||||
tc.convertTransfer(channelID, true, channelState, baseDiagnostics, channelRequests.Current, true)
|
||||
for _, requestID := range channelRequests.Previous {
|
||||
// add any previous requests that were cancelled for a restart
|
||||
tc.convertTransfer(channelID, true, channelState, baseDiagnostics, requestID, false)
|
||||
}
|
||||
}
|
||||
|
||||
// collect any graphsync data for channels we don't have any data transfer data for
|
||||
tc.collectRemainingTransfers()
|
||||
|
||||
return tc.transfers
|
||||
}
|
||||
|
||||
type transferConverter struct {
|
||||
matchedChannelIds map[datatransfer.ChannelID]struct{}
|
||||
matchedRequests map[graphsync.RequestID]*api.GraphSyncDataTransfer
|
||||
transfers []*api.GraphSyncDataTransfer
|
||||
gsDiagnostics map[graphsync.RequestID][]string
|
||||
requestStates graphsync.RequestStates
|
||||
allChannels map[datatransfer.ChannelID]datatransfer.ChannelState
|
||||
}
|
||||
|
||||
// convert transfer assembles transfer and diagnostic data for a given graphsync/data-transfer request
|
||||
func (tc *transferConverter) convertTransfer(channelID datatransfer.ChannelID, hasChannelID bool, channelState *api.DataTransferChannel, baseDiagnostics []string,
|
||||
requestID graphsync.RequestID, isCurrentChannelRequest bool) {
|
||||
diagnostics := baseDiagnostics
|
||||
state, hasState := tc.requestStates[requestID]
|
||||
stateString := state.String()
|
||||
if !hasState {
|
||||
stateString = "no graphsync state found"
|
||||
}
|
||||
var channelIDPtr *datatransfer.ChannelID
|
||||
if !hasChannelID {
|
||||
diagnostics = append(diagnostics, fmt.Sprintf("No data transfer channel id for GraphSync request ID %s", requestID))
|
||||
} else {
|
||||
channelIDPtr = &channelID
|
||||
if isCurrentChannelRequest && !hasState {
|
||||
diagnostics = append(diagnostics, fmt.Sprintf("No current request state for data transfer channel id %s", channelID))
|
||||
} else if !isCurrentChannelRequest && hasState {
|
||||
diagnostics = append(diagnostics, fmt.Sprintf("Graphsync request %s is a previous request on data transfer channel id %s that was restarted, but it is still running", requestID, channelID))
|
||||
}
|
||||
}
|
||||
diagnostics = append(diagnostics, tc.gsDiagnostics[requestID]...)
|
||||
transfer := &api.GraphSyncDataTransfer{
|
||||
RequestID: &requestID,
|
||||
RequestState: stateString,
|
||||
IsCurrentChannelRequest: isCurrentChannelRequest,
|
||||
ChannelID: channelIDPtr,
|
||||
ChannelState: channelState,
|
||||
Diagnostics: diagnostics,
|
||||
}
|
||||
tc.transfers = append(tc.transfers, transfer)
|
||||
tc.matchedRequests[requestID] = transfer
|
||||
if hasChannelID {
|
||||
tc.matchedChannelIds[channelID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *transferConverter) collectRemainingTransfers() {
|
||||
for requestID := range tc.requestStates {
|
||||
if _, ok := tc.matchedRequests[requestID]; !ok {
|
||||
tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false)
|
||||
}
|
||||
}
|
||||
for requestID := range tc.gsDiagnostics {
|
||||
if _, ok := tc.matchedRequests[requestID]; !ok {
|
||||
tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false)
|
||||
}
|
||||
}
|
||||
for channelID, channelState := range tc.allChannels {
|
||||
if _, ok := tc.matchedChannelIds[channelID]; !ok {
|
||||
channelID := channelID
|
||||
cs := api.NewDataTransferChannel(channelState.SelfPeer(), channelState)
|
||||
transfer := &api.GraphSyncDataTransfer{
|
||||
RequestID: nil,
|
||||
RequestState: "graphsync state unknown",
|
||||
IsCurrentChannelRequest: false,
|
||||
ChannelID: &channelID,
|
||||
ChannelState: &cs,
|
||||
Diagnostics: []string{"data transfer with no open transport channel, cannot determine linked graphsync request"},
|
||||
}
|
||||
tc.transfers = append(tc.transfers, transfer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketPendingDeals(ctx context.Context) (api.PendingDealInfo, error) {
|
||||
return sm.DealPublisher.PendingDeals(), nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error {
|
||||
return sm.StorageProvider.RetryDealPublishing(propcid)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) MarketPublishPendingDeals(ctx context.Context) error {
|
||||
sm.DealPublisher.ForcePublishPendingDeals()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreListShards(ctx context.Context) ([]api.DagstoreShardInfo, error) {
|
||||
if sm.DAGStore == nil {
|
||||
return nil, fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
info := sm.DAGStore.AllShardsInfo()
|
||||
ret := make([]api.DagstoreShardInfo, 0, len(info))
|
||||
for k, i := range info {
|
||||
ret = append(ret, api.DagstoreShardInfo{
|
||||
Key: k.String(),
|
||||
State: i.ShardState.String(),
|
||||
Error: func() string {
|
||||
if i.Error == nil {
|
||||
return ""
|
||||
}
|
||||
return i.Error.Error()
|
||||
}(),
|
||||
})
|
||||
}
|
||||
|
||||
// order by key.
|
||||
sort.SliceStable(ret, func(i, j int) bool {
|
||||
return ret[i].Key < ret[j].Key
|
||||
})
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreRegisterShard(ctx context.Context, key string) error {
|
||||
if sm.DAGStore == nil {
|
||||
return fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
// First check if the shard has already been registered
|
||||
k := shard.KeyFromString(key)
|
||||
_, err := sm.DAGStore.GetShardInfo(k)
|
||||
if err == nil {
|
||||
// Shard already registered, nothing further to do
|
||||
return nil
|
||||
}
|
||||
// If the shard is not registered we would expect ErrShardUnknown
|
||||
if !errors.Is(err, dagstore.ErrShardUnknown) {
|
||||
return fmt.Errorf("getting shard info from DAG store: %w", err)
|
||||
}
|
||||
|
||||
pieceCid, err := cid.Parse(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing shard key as piece cid: %w", err)
|
||||
}
|
||||
|
||||
if err = filmktsstore.RegisterShardSync(ctx, sm.DAGStoreWrapper, pieceCid, "", true); err != nil {
|
||||
return fmt.Errorf("failed to register shard: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreInitializeShard(ctx context.Context, key string) error {
|
||||
if sm.DAGStore == nil {
|
||||
return fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
k := shard.KeyFromString(key)
|
||||
|
||||
info, err := sm.DAGStore.GetShardInfo(k)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get shard info: %w", err)
|
||||
}
|
||||
if st := info.ShardState; st != dagstore.ShardStateNew {
|
||||
return fmt.Errorf("cannot initialize shard; expected state ShardStateNew, was: %s", st.String())
|
||||
}
|
||||
|
||||
ch := make(chan dagstore.ShardResult, 1)
|
||||
if err = sm.DAGStore.AcquireShard(ctx, k, ch, dagstore.AcquireOpts{}); err != nil {
|
||||
return fmt.Errorf("failed to acquire shard: %w", err)
|
||||
}
|
||||
|
||||
var res dagstore.ShardResult
|
||||
select {
|
||||
case res = <-ch:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if err := res.Error; err != nil {
|
||||
return fmt.Errorf("failed to acquire shard: %w", err)
|
||||
}
|
||||
|
||||
if res.Accessor != nil {
|
||||
err = res.Accessor.Close()
|
||||
if err != nil {
|
||||
log.Warnw("failed to close shard accessor; continuing", "shard_key", k, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreInitializeAll(ctx context.Context, params api.DagstoreInitializeAllParams) (<-chan api.DagstoreInitializeAllEvent, error) {
|
||||
if sm.DAGStore == nil {
|
||||
return nil, fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
if sm.SectorAccessor == nil {
|
||||
return nil, fmt.Errorf("sector accessor not available on this node")
|
||||
}
|
||||
|
||||
// prepare the thottler tokens.
|
||||
var throttle chan struct{}
|
||||
if c := params.MaxConcurrency; c > 0 {
|
||||
throttle = make(chan struct{}, c)
|
||||
for i := 0; i < c; i++ {
|
||||
throttle <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// are we initializing only unsealed pieces?
|
||||
onlyUnsealed := !params.IncludeSealed
|
||||
|
||||
info := sm.DAGStore.AllShardsInfo()
|
||||
var toInitialize []string
|
||||
for k, i := range info {
|
||||
if i.ShardState != dagstore.ShardStateNew {
|
||||
continue
|
||||
}
|
||||
|
||||
// if we're initializing only unsealed pieces, check if there's an
|
||||
// unsealed deal for this piece available.
|
||||
if onlyUnsealed {
|
||||
pieceCid, err := cid.Decode(k.String())
|
||||
if err != nil {
|
||||
log.Warnw("DagstoreInitializeAll: failed to decode shard key as piece CID; skipping", "shard_key", k.String(), "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
pi, err := sm.PieceStore.GetPieceInfo(pieceCid)
|
||||
if err != nil {
|
||||
log.Warnw("DagstoreInitializeAll: failed to get piece info; skipping", "piece_cid", pieceCid, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var isUnsealed bool
|
||||
for _, d := range pi.Deals {
|
||||
isUnsealed, err = sm.SectorAccessor.IsUnsealed(ctx, d.SectorID, d.Offset.Unpadded(), d.Length.Unpadded())
|
||||
if err != nil {
|
||||
log.Warnw("DagstoreInitializeAll: failed to get unsealed status; skipping deal", "deal_id", d.DealID, "error", err)
|
||||
continue
|
||||
}
|
||||
if isUnsealed {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !isUnsealed {
|
||||
log.Infow("DagstoreInitializeAll: skipping piece because it's sealed", "piece_cid", pieceCid, "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// yes, we're initializing this shard.
|
||||
toInitialize = append(toInitialize, k.String())
|
||||
}
|
||||
|
||||
total := len(toInitialize)
|
||||
if total == 0 {
|
||||
out := make(chan api.DagstoreInitializeAllEvent)
|
||||
close(out)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// response channel must be closed when we're done, or the context is cancelled.
|
||||
// this buffering is necessary to prevent inflight children goroutines from
|
||||
// publishing to a closed channel (res) when the context is cancelled.
|
||||
out := make(chan api.DagstoreInitializeAllEvent, 32) // internal buffer.
|
||||
res := make(chan api.DagstoreInitializeAllEvent, 32) // returned to caller.
|
||||
|
||||
// pump events back to caller.
|
||||
// two events per shard.
|
||||
go func() {
|
||||
defer close(res)
|
||||
|
||||
for i := 0; i < total*2; i++ {
|
||||
select {
|
||||
case res <- <-out:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i, k := range toInitialize {
|
||||
if throttle != nil {
|
||||
select {
|
||||
case <-throttle:
|
||||
// acquired a throttle token, proceed.
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
go func(k string, i int) {
|
||||
r := api.DagstoreInitializeAllEvent{
|
||||
Key: k,
|
||||
Event: "start",
|
||||
Total: total,
|
||||
Current: i + 1, // start with 1
|
||||
}
|
||||
select {
|
||||
case out <- r:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
err := sm.DagstoreInitializeShard(ctx, k)
|
||||
|
||||
if throttle != nil {
|
||||
throttle <- struct{}{}
|
||||
}
|
||||
|
||||
r.Event = "end"
|
||||
if err == nil {
|
||||
r.Success = true
|
||||
} else {
|
||||
r.Success = false
|
||||
r.Error = err.Error()
|
||||
}
|
||||
|
||||
select {
|
||||
case out <- r:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}(k, i)
|
||||
}
|
||||
}()
|
||||
|
||||
return res, nil
|
||||
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreRecoverShard(ctx context.Context, key string) error {
|
||||
if sm.DAGStore == nil {
|
||||
return fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
k := shard.KeyFromString(key)
|
||||
|
||||
info, err := sm.DAGStore.GetShardInfo(k)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get shard info: %w", err)
|
||||
}
|
||||
if st := info.ShardState; st != dagstore.ShardStateErrored {
|
||||
return fmt.Errorf("cannot recover shard; expected state ShardStateErrored, was: %s", st.String())
|
||||
}
|
||||
|
||||
ch := make(chan dagstore.ShardResult, 1)
|
||||
if err = sm.DAGStore.RecoverShard(ctx, k, ch, dagstore.RecoverOpts{}); err != nil {
|
||||
return fmt.Errorf("failed to recover shard: %w", err)
|
||||
}
|
||||
|
||||
var res dagstore.ShardResult
|
||||
select {
|
||||
case res = <-ch:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return res.Error
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreGC(ctx context.Context) ([]api.DagstoreShardResult, error) {
|
||||
if sm.DAGStore == nil {
|
||||
return nil, fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
res, err := sm.DAGStore.GC(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to gc: %w", err)
|
||||
}
|
||||
|
||||
ret := make([]api.DagstoreShardResult, 0, len(res.Shards))
|
||||
for k, err := range res.Shards {
|
||||
r := api.DagstoreShardResult{Key: k.String()}
|
||||
if err == nil {
|
||||
r.Success = true
|
||||
} else {
|
||||
r.Success = false
|
||||
r.Error = err.Error()
|
||||
}
|
||||
ret = append(ret, r)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error {
|
||||
return sm.StorageProvider.AnnounceDealToIndexer(ctx, proposalCid)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) IndexerAnnounceAllDeals(ctx context.Context) error {
|
||||
return sm.StorageProvider.AnnounceAllDealsToIndexer(ctx)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]api.DagstoreShardInfo, error) {
|
||||
if sm.DAGStore == nil {
|
||||
return nil, fmt.Errorf("dagstore not available on this node")
|
||||
}
|
||||
|
||||
keys, err := sm.DAGStore.TopLevelIndex.GetShardsForMultihash(ctx, cid.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ret []api.DagstoreShardInfo
|
||||
|
||||
for _, k := range keys {
|
||||
shard, err := sm.DAGStore.GetShardInfo(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret = append(ret, api.DagstoreShardInfo{
|
||||
Key: k.String(),
|
||||
State: shard.ShardState.String(),
|
||||
Error: func() string {
|
||||
if shard.Error == nil {
|
||||
return ""
|
||||
}
|
||||
return shard.Error.Error()
|
||||
}(),
|
||||
})
|
||||
}
|
||||
|
||||
// order by key.
|
||||
sort.SliceStable(ret, func(i, j int) bool {
|
||||
return ret[i].Key < ret[j].Key
|
||||
})
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsList(ctx context.Context) ([]*api.MarketDeal, error) {
|
||||
return sm.listDeals(ctx)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) RetrievalDealsList(ctx context.Context) (map[retrievalmarket.ProviderDealIdentifier]retrievalmarket.ProviderDealState, error) {
|
||||
return sm.RetrievalProvider.ListDeals(), nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) {
|
||||
return sm.ConsiderOnlineStorageDealsConfigFunc()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error {
|
||||
return sm.SetConsiderOnlineStorageDealsConfigFunc(b)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) {
|
||||
return sm.ConsiderOnlineRetrievalDealsConfigFunc()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error {
|
||||
return sm.SetConsiderOnlineRetrievalDealsConfigFunc(b)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) {
|
||||
return sm.ConsiderOfflineStorageDealsConfigFunc()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error {
|
||||
return sm.SetConsiderOfflineStorageDealsConfigFunc(b)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) {
|
||||
return sm.ConsiderOfflineRetrievalDealsConfigFunc()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error {
|
||||
return sm.SetConsiderOfflineRetrievalDealsConfigFunc(b)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsConsiderVerifiedStorageDeals(ctx context.Context) (bool, error) {
|
||||
return sm.ConsiderVerifiedStorageDealsConfigFunc()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsSetConsiderVerifiedStorageDeals(ctx context.Context, b bool) error {
|
||||
return sm.SetConsiderVerifiedStorageDealsConfigFunc(b)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsConsiderUnverifiedStorageDeals(ctx context.Context) (bool, error) {
|
||||
return sm.ConsiderUnverifiedStorageDealsConfigFunc()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsSetConsiderUnverifiedStorageDeals(ctx context.Context, b bool) error {
|
||||
return sm.SetConsiderUnverifiedStorageDealsConfigFunc(b)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsGetExpectedSealDurationFunc(ctx context.Context) (time.Duration, error) {
|
||||
return sm.GetExpectedSealDurationFunc()
|
||||
}
|
||||
@ -1242,24 +524,6 @@ func (sm *StorageMinerAPI) DealsSetExpectedSealDurationFunc(ctx context.Context,
|
||||
return sm.SetExpectedSealDurationFunc(d)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsImportData(ctx context.Context, deal cid.Cid, fname string) error {
|
||||
fi, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to open given file: %w", err)
|
||||
}
|
||||
defer fi.Close() //nolint:errcheck
|
||||
|
||||
return sm.StorageProvider.ImportDataForDeal(ctx, deal, fi)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) {
|
||||
return sm.StorageDealPieceCidBlocklistConfigFunc()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error {
|
||||
return sm.SetStorageDealPieceCidBlocklistConfigFunc(cids)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) StorageAddLocal(ctx context.Context, path string) error {
|
||||
if sm.StorageMgr == nil {
|
||||
return xerrors.Errorf("no storage manager")
|
||||
@ -1283,32 +547,6 @@ func (sm *StorageMinerAPI) StorageRedeclareLocal(ctx context.Context, id *storif
|
||||
|
||||
return sm.StorageMgr.RedeclareLocalStorage(ctx, id, dropMissing)
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) PiecesListPieces(ctx context.Context) ([]cid.Cid, error) {
|
||||
return sm.PieceStore.ListPieceInfoKeys()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) {
|
||||
return sm.PieceStore.ListCidInfoKeys()
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) {
|
||||
pi, err := sm.PieceStore.GetPieceInfo(pieceCid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pi, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) {
|
||||
ci, err := sm.PieceStore.GetCIDInfo(payloadCid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ci, nil
|
||||
}
|
||||
|
||||
func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error {
|
||||
return backup(ctx, sm.DS, fpath)
|
||||
}
|
||||
|
||||
@ -100,16 +100,6 @@ func CheckUDPBufferSize(wanted int) func(al *alerting.Alerting) {
|
||||
}
|
||||
}
|
||||
|
||||
func LegacyMarketsEOL(al *alerting.Alerting) {
|
||||
// Add alert if lotus-miner legacy markets subsystem is still in use
|
||||
alert := al.AddAlertType("system", "EOL")
|
||||
|
||||
// Alert with a message to migrate to Boost or similar markets subsystems
|
||||
al.Raise(alert, map[string]string{
|
||||
"message": "The lotus-miner legacy markets subsystem is deprecated and will be removed in a future release. Please migrate to [Boost](https://boost.filecoin.io) or similar markets subsystems.",
|
||||
})
|
||||
}
|
||||
|
||||
func CheckFvmConcurrency() func(al *alerting.Alerting) {
|
||||
return func(al *alerting.Alerting) {
|
||||
fvmConcurrency, ok := os.LookupEnv("LOTUS_FVM_CONCURRENCY")
|
||||
|
||||
@ -1,218 +0,0 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-data-transfer/v2/channelmonitor"
|
||||
dtimpl "github.com/filecoin-project/go-data-transfer/v2/impl"
|
||||
dtnet "github.com/filecoin-project/go-data-transfer/v2/network"
|
||||
dtgstransport "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync"
|
||||
"github.com/filecoin-project/go-fil-markets/discovery"
|
||||
discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
|
||||
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
||||
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/market"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/markets"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/markets/retrievaladapter"
|
||||
"github.com/filecoin-project/lotus/markets/storageadapter"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
payapi "github.com/filecoin-project/lotus/node/impl/paych"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
func HandleMigrateClientFunds(lc fx.Lifecycle, mctx helpers.MetricsCtx, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) {
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
addr, err := wallet.WalletDefaultAddress(ctx)
|
||||
// nothing to be done if there is no default address
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
b, err := ds.Get(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client"))
|
||||
if err != nil {
|
||||
if xerrors.Is(err, datastore.ErrNotFound) {
|
||||
return nil
|
||||
}
|
||||
log.Errorf("client funds migration - getting datastore value: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var value abi.TokenAmount
|
||||
if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
log.Errorf("client funds migration - unmarshalling datastore value: %v", err)
|
||||
return nil
|
||||
}
|
||||
_, err = fundMgr.Reserve(ctx, addr, addr, value)
|
||||
if err != nil {
|
||||
log.Errorf("client funds migration - reserving funds (wallet %s, addr %s, funds %d): %v",
|
||||
addr, addr, value, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return ds.Delete(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client"))
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func ClientImportMgr(ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientImportMgr, error) {
|
||||
// store the imports under the repo's `imports` subdirectory.
|
||||
dir := filepath.Join(r.Path(), "imports")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, xerrors.Errorf("failed to create directory %s: %w", dir, err)
|
||||
}
|
||||
|
||||
ns := namespace.Wrap(ds, datastore.NewKey("/client"))
|
||||
return imports.NewManager(ns, dir), nil
|
||||
}
|
||||
|
||||
// TODO this should be removed.
|
||||
func ClientBlockstore() dtypes.ClientBlockstore {
|
||||
// in most cases this is now unused in normal operations -- however, it's important to preserve for the IPFS use case
|
||||
return blockstore.WrapIDStore(blockstore.FromDatastore(datastore.NewMapDatastore()))
|
||||
}
|
||||
|
||||
// NewClientGraphsyncDataTransfer returns a data transfer manager that just
|
||||
// uses the clients's Client DAG service for transfers
|
||||
func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Graphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ClientDataTransfer, error) {
|
||||
// go-data-transfer protocol retries:
|
||||
// 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour
|
||||
dtRetryParams := dtnet.RetryParameters(time.Second, 5*time.Minute, 15, 5)
|
||||
net := dtnet.NewFromLibp2pHost(h, dtRetryParams)
|
||||
|
||||
dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers"))
|
||||
transport := dtgstransport.NewTransport(h.ID(), gs)
|
||||
|
||||
// data-transfer push / pull channel restart configuration:
|
||||
dtRestartConfig := dtimpl.ChannelRestartConfig(channelmonitor.Config{
|
||||
// Disable Accept and Complete timeouts until this issue is resolved:
|
||||
// https://github.com/filecoin-project/lotus/issues/6343#
|
||||
// Wait for the other side to respond to an Open channel message
|
||||
AcceptTimeout: 0,
|
||||
// Wait for the other side to send a Complete message once all
|
||||
// data has been sent / received
|
||||
CompleteTimeout: 0,
|
||||
|
||||
// When an error occurs, wait a little while until all related errors
|
||||
// have fired before sending a restart message
|
||||
RestartDebounce: 10 * time.Second,
|
||||
// After sending a restart, wait for at least 1 minute before sending another
|
||||
RestartBackoff: time.Minute,
|
||||
// After trying to restart 3 times, give up and fail the transfer
|
||||
MaxConsecutiveRestarts: 3,
|
||||
})
|
||||
dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, dtRestartConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt.OnReady(marketevents.ReadyLogger("client data transfer"))
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
dt.SubscribeToEvents(marketevents.DataTransferLogger)
|
||||
return dt.Start(ctx)
|
||||
},
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return dt.Stop(ctx)
|
||||
},
|
||||
})
|
||||
return dt, nil
|
||||
}
|
||||
|
||||
// NewClientDatastore creates a datastore for the client to store its deals
|
||||
func NewClientDatastore(ds dtypes.MetadataDS) dtypes.ClientDatastore {
|
||||
return namespace.Wrap(ds, datastore.NewKey("/deals/client"))
|
||||
}
|
||||
|
||||
// StorageBlockstoreAccessor returns the default storage blockstore accessor
|
||||
// from the import manager.
|
||||
func StorageBlockstoreAccessor(importmgr dtypes.ClientImportMgr) storagemarket.BlockstoreAccessor {
|
||||
return storageadapter.NewImportsBlockstoreAccessor(importmgr)
|
||||
}
|
||||
|
||||
// RetrievalBlockstoreAccessor returns the default retrieval blockstore accessor
|
||||
// using the subdirectory `retrievals` under the repo.
|
||||
func RetrievalBlockstoreAccessor(r repo.LockedRepo) (retrievalmarket.BlockstoreAccessor, error) {
|
||||
dir := filepath.Join(r.Path(), "retrievals")
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, xerrors.Errorf("failed to create directory %s: %w", dir, err)
|
||||
}
|
||||
return retrievaladapter.NewCARBlockstoreAccessor(dir), nil
|
||||
}
|
||||
|
||||
func StorageClient(lc fx.Lifecycle, h host.Host, dataTransfer dtypes.ClientDataTransfer, discovery *discoveryimpl.Local,
|
||||
deals dtypes.ClientDatastore, scn storagemarket.StorageClientNode, accessor storagemarket.BlockstoreAccessor, j journal.Journal) (storagemarket.StorageClient, error) {
|
||||
// go-fil-markets protocol retries:
|
||||
// 1s, 5s, 25s, 2m5s, 5m x 11 ~= 1 hour
|
||||
marketsRetryParams := smnet.RetryParameters(time.Second, 5*time.Minute, 15, 5)
|
||||
net := smnet.NewFromLibp2pHost(h, marketsRetryParams)
|
||||
|
||||
c, err := storageimpl.NewClient(net, dataTransfer, discovery, deals, scn, accessor, storageimpl.DealPollingInterval(time.Second), storageimpl.MaxTraversalLinks(config.MaxTraversalLinks))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.OnReady(marketevents.ReadyLogger("storage client"))
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
c.SubscribeToEvents(marketevents.StorageClientLogger)
|
||||
|
||||
evtType := j.RegisterEventType("markets/storage/client", "state_change")
|
||||
c.SubscribeToEvents(markets.StorageClientJournaler(j, evtType))
|
||||
|
||||
return c.Start(ctx)
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
return c.Stop()
|
||||
},
|
||||
})
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// RetrievalClient creates a new retrieval client attached to the client blockstore
|
||||
func RetrievalClient(forceOffChain bool) func(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver,
|
||||
ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, accessor *retrievaladapter.APIBlockstoreAccessor, j journal.Journal) (retrievalmarket.RetrievalClient, error) {
|
||||
return func(lc fx.Lifecycle, h host.Host, r repo.LockedRepo, dt dtypes.ClientDataTransfer, payAPI payapi.PaychAPI, resolver discovery.PeerResolver,
|
||||
ds dtypes.MetadataDS, chainAPI full.ChainAPI, stateAPI full.StateAPI, accessor *retrievaladapter.APIBlockstoreAccessor, j journal.Journal) (retrievalmarket.RetrievalClient, error) {
|
||||
adapter := retrievaladapter.NewRetrievalClientNode(forceOffChain, payAPI, chainAPI, stateAPI)
|
||||
network := rmnet.NewFromLibp2pHost(h)
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/retrievals/client"))
|
||||
client, err := retrievalimpl.NewClient(network, dt, adapter, resolver, ds, accessor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.OnReady(marketevents.ReadyLogger("retrieval client"))
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
client.SubscribeToEvents(marketevents.RetrievalClientLogger)
|
||||
|
||||
evtType := j.RegisterEventType("markets/retrieval/client", "state_change")
|
||||
client.SubscribeToEvents(markets.RetrievalClientJournaler(j, evtType))
|
||||
|
||||
return client.Start(ctx)
|
||||
},
|
||||
})
|
||||
return client, nil
|
||||
}
|
||||
}
|
||||
@ -1,14 +1,11 @@
|
||||
package dtypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
|
||||
@ -89,11 +86,3 @@ type SetExpectedSealDurationFunc func(time.Duration) error
|
||||
// GetExpectedSealDurationFunc is a function which reads from miner
|
||||
// too determine how long sealing is expected to take
|
||||
type GetExpectedSealDurationFunc func() (time.Duration, error)
|
||||
|
||||
type SetMaxDealStartDelayFunc func(time.Duration) error
|
||||
type GetMaxDealStartDelayFunc func() (time.Duration, error)
|
||||
|
||||
type StorageDealFilter func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error)
|
||||
type RetrievalDealFilter func(ctx context.Context, deal retrievalmarket.ProviderDealState) (bool, string, error)
|
||||
|
||||
type RetrievalPricingFunc func(ctx context.Context, dealPricingParams retrievalmarket.PricingInput) (retrievalmarket.Ask, error)
|
||||
|
||||
@ -4,16 +4,8 @@ import (
|
||||
bserv "github.com/ipfs/boxo/blockservice"
|
||||
exchange "github.com/ipfs/boxo/exchange"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||
dtnet "github.com/filecoin-project/go-data-transfer/v2/network"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
)
|
||||
|
||||
// MetadataDS stores metadata. By default it's namespaced under /metadata in
|
||||
@ -67,26 +59,3 @@ type (
|
||||
|
||||
type ChainBitswap exchange.Interface
|
||||
type ChainBlockService bserv.BlockService
|
||||
|
||||
type ClientImportMgr *imports.Manager
|
||||
type ClientBlockstore blockstore.BasicBlockstore
|
||||
type ClientDealStore *statestore.StateStore
|
||||
type ClientRequestValidator *requestvalidation.UnifiedRequestValidator
|
||||
type ClientDatastore datastore.Batching
|
||||
|
||||
type Graphsync graphsync.GraphExchange
|
||||
|
||||
// ClientDataTransfer is a data transfer manager for the client
|
||||
type ClientDataTransfer datatransfer.Manager
|
||||
|
||||
type ProviderDealStore *statestore.StateStore
|
||||
type ProviderPieceStore piecestore.PieceStore
|
||||
|
||||
type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator
|
||||
|
||||
// ProviderDataTransfer is a data transfer manager for the provider
|
||||
type ProviderDataTransfer datatransfer.Manager
|
||||
type ProviderTransferNetwork dtnet.DataTransferNetwork
|
||||
type ProviderTransport datatransfer.Transport
|
||||
type StagingBlockstore blockstore.BasicBlockstore
|
||||
type StagingGraphsync graphsync.GraphExchange
|
||||
|
||||
@ -1,101 +0,0 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-graphsync"
|
||||
graphsyncimpl "github.com/ipfs/go-graphsync/impl"
|
||||
gsnet "github.com/ipfs/go-graphsync/network"
|
||||
"github.com/ipfs/go-graphsync/storeutil"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"go.opencensus.io/stats"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/modules/helpers"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
// Graphsync creates a graphsync instance from the given loader and storer
|
||||
func Graphsync(parallelTransfersForStorage uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, r repo.LockedRepo, clientBs dtypes.ClientBlockstore, chainBs dtypes.ExposedBlockstore, h host.Host) (dtypes.Graphsync, error) {
|
||||
graphsyncNetwork := gsnet.NewFromLibp2pHost(h)
|
||||
lsys := storeutil.LinkSystemForBlockstore(clientBs)
|
||||
|
||||
gs := graphsyncimpl.New(helpers.LifecycleCtx(mctx, lc),
|
||||
graphsyncNetwork,
|
||||
lsys,
|
||||
graphsyncimpl.RejectAllRequestsByDefault(),
|
||||
graphsyncimpl.MaxInProgressIncomingRequests(parallelTransfersForStorage),
|
||||
graphsyncimpl.MaxInProgressOutgoingRequests(parallelTransfersForRetrieval),
|
||||
graphsyncimpl.MaxLinksPerIncomingRequests(config.MaxTraversalLinks),
|
||||
graphsyncimpl.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks))
|
||||
chainLinkSystem := storeutil.LinkSystemForBlockstore(chainBs)
|
||||
err := gs.RegisterPersistenceOption("chainstore", chainLinkSystem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gs.RegisterIncomingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) {
|
||||
_, has := requestData.Extension("chainsync")
|
||||
if has {
|
||||
// TODO: we should confirm the selector is a reasonable one before we validate
|
||||
// TODO: this code will get more complicated and should probably not live here eventually
|
||||
hookActions.ValidateRequest()
|
||||
hookActions.UsePersistenceOption("chainstore")
|
||||
}
|
||||
})
|
||||
gs.RegisterOutgoingRequestHook(func(p peer.ID, requestData graphsync.RequestData, hookActions graphsync.OutgoingRequestHookActions) {
|
||||
_, has := requestData.Extension("chainsync")
|
||||
if has {
|
||||
hookActions.UsePersistenceOption("chainstore")
|
||||
}
|
||||
})
|
||||
|
||||
graphsyncStats(mctx, lc, gs)
|
||||
|
||||
return gs, nil
|
||||
}
|
||||
}
|
||||
|
||||
func graphsyncStats(mctx helpers.MetricsCtx, lc fx.Lifecycle, gs dtypes.Graphsync) {
|
||||
stopStats := make(chan struct{})
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(context.Context) error {
|
||||
go func() {
|
||||
t := time.NewTicker(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
|
||||
st := gs.Stats()
|
||||
stats.Record(mctx, metrics.GraphsyncReceivingPeersCount.M(int64(st.OutgoingRequests.TotalPeers)))
|
||||
stats.Record(mctx, metrics.GraphsyncReceivingActiveCount.M(int64(st.OutgoingRequests.Active)))
|
||||
stats.Record(mctx, metrics.GraphsyncReceivingCountCount.M(int64(st.OutgoingRequests.Pending)))
|
||||
stats.Record(mctx, metrics.GraphsyncReceivingTotalMemoryAllocated.M(int64(st.IncomingResponses.TotalAllocatedAllPeers)))
|
||||
stats.Record(mctx, metrics.GraphsyncReceivingTotalPendingAllocations.M(int64(st.IncomingResponses.TotalPendingAllocations)))
|
||||
stats.Record(mctx, metrics.GraphsyncReceivingPeersPending.M(int64(st.IncomingResponses.NumPeersWithPendingAllocations)))
|
||||
stats.Record(mctx, metrics.GraphsyncSendingPeersCount.M(int64(st.IncomingRequests.TotalPeers)))
|
||||
stats.Record(mctx, metrics.GraphsyncSendingActiveCount.M(int64(st.IncomingRequests.Active)))
|
||||
stats.Record(mctx, metrics.GraphsyncSendingCountCount.M(int64(st.IncomingRequests.Pending)))
|
||||
stats.Record(mctx, metrics.GraphsyncSendingTotalMemoryAllocated.M(int64(st.OutgoingResponses.TotalAllocatedAllPeers)))
|
||||
stats.Record(mctx, metrics.GraphsyncSendingTotalPendingAllocations.M(int64(st.OutgoingResponses.TotalPendingAllocations)))
|
||||
stats.Record(mctx, metrics.GraphsyncSendingPeersPending.M(int64(st.OutgoingResponses.NumPeersWithPendingAllocations)))
|
||||
|
||||
case <-stopStats:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
},
|
||||
OnStop: func(ctx context.Context) error {
|
||||
close(stopStats)
|
||||
return nil
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -6,8 +6,6 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@ -17,9 +15,6 @@ import (
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/discovery"
|
||||
discoveryimpl "github.com/filecoin-project/go-fil-markets/discovery/impl"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain"
|
||||
"github.com/filecoin-project/lotus/chain/beacon"
|
||||
@ -34,7 +29,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/journal/fsjournal"
|
||||
"github.com/filecoin-project/lotus/lib/peermgr"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/hello"
|
||||
"github.com/filecoin-project/lotus/node/impl/full"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -224,24 +218,6 @@ func RelayIndexerMessages(lc fx.Lifecycle, ps *pubsub.PubSub, nn dtypes.NetworkN
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewLocalDiscovery(lc fx.Lifecycle, ds dtypes.MetadataDS) (*discoveryimpl.Local, error) {
|
||||
local, err := discoveryimpl.NewLocal(namespace.Wrap(ds, datastore.NewKey("/deals/local")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
local.OnReady(marketevents.ReadyLogger("discovery"))
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
return local.Start(ctx)
|
||||
},
|
||||
})
|
||||
return local, nil
|
||||
}
|
||||
|
||||
func RetrievalResolver(l *discoveryimpl.Local) discovery.PeerResolver {
|
||||
return discoveryimpl.Multi(l)
|
||||
}
|
||||
|
||||
type RandomBeaconParams struct {
|
||||
fx.In
|
||||
|
||||
|
||||
@ -1,53 +1,28 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
graphsync "github.com/ipfs/go-graphsync/impl"
|
||||
gsnet "github.com/ipfs/go-graphsync/network"
|
||||
"github.com/ipfs/go-graphsync/storeutil"
|
||||
provider "github.com/ipni/index-provider"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"go.uber.org/fx"
|
||||
"go.uber.org/multierr"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
dtimpl "github.com/filecoin-project/go-data-transfer/v2/impl"
|
||||
dtnet "github.com/filecoin-project/go-data-transfer/v2/network"
|
||||
dtgstransport "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync"
|
||||
piecefilestore "github.com/filecoin-project/go-fil-markets/filestore"
|
||||
piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
|
||||
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
|
||||
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-paramfetch"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-statestore"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
@ -55,11 +30,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/markets"
|
||||
"github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/markets/idxprov"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/markets/pricing"
|
||||
lotusminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -332,163 +302,6 @@ func WindowPostScheduler(fc config.MinerFeeConfig, pc config.ProvingConfig) func
|
||||
}
|
||||
}
|
||||
|
||||
func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider, j journal.Journal) {
|
||||
m.OnReady(marketevents.ReadyLogger("retrieval provider"))
|
||||
lc.Append(fx.Hook{
|
||||
|
||||
OnStart: func(ctx context.Context) error {
|
||||
m.SubscribeToEvents(marketevents.RetrievalProviderLogger)
|
||||
|
||||
evtType := j.RegisterEventType("markets/retrieval/provider", "state_change")
|
||||
m.SubscribeToEvents(markets.RetrievalProviderJournaler(j, evtType))
|
||||
|
||||
return m.Start(ctx)
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
return m.Stop()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h storagemarket.StorageProvider, j journal.Journal) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
h.OnReady(marketevents.ReadyLogger("storage provider"))
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(context.Context) error {
|
||||
h.SubscribeToEvents(marketevents.StorageProviderLogger)
|
||||
|
||||
evtType := j.RegisterEventType("markets/storage/provider", "state_change")
|
||||
h.SubscribeToEvents(markets.StorageProviderJournaler(j, evtType))
|
||||
|
||||
return h.Start(ctx)
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
return h.Stop()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.FullNode, minerAddress dtypes.MinerAddress) {
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
b, err := ds.Get(ctx, datastore.NewKey("/marketfunds/provider"))
|
||||
if err != nil {
|
||||
if xerrors.Is(err, datastore.ErrNotFound) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var value abi.TokenAmount
|
||||
if err = value.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
||||
return err
|
||||
}
|
||||
ts, err := node.ChainHead(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("provider funds migration - getting chain head: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
mi, err := node.StateMinerInfo(ctx, address.Address(minerAddress), ts.Key())
|
||||
if err != nil {
|
||||
log.Errorf("provider funds migration - getting miner info %s: %v", minerAddress, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = node.MarketReserveFunds(ctx, mi.Worker, address.Address(minerAddress), value)
|
||||
if err != nil {
|
||||
log.Errorf("provider funds migration - reserving funds (wallet %s, addr %s, funds %d): %v",
|
||||
mi.Worker, minerAddress, value, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
return ds.Delete(ctx, datastore.NewKey("/marketfunds/provider"))
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NewProviderTransferNetwork sets up the libp2p2 protocol networking for data transfer
|
||||
func NewProviderTransferNetwork(h host.Host) dtypes.ProviderTransferNetwork {
|
||||
return dtnet.NewFromLibp2pHost(h)
|
||||
}
|
||||
|
||||
// NewProviderTransport sets up a data transfer transport over graphsync
|
||||
func NewProviderTransport(h host.Host, gs dtypes.StagingGraphsync) dtypes.ProviderTransport {
|
||||
return dtgstransport.NewTransport(h.ID(), gs)
|
||||
}
|
||||
|
||||
// NewProviderDataTransfer returns a data transfer manager
|
||||
func NewProviderDataTransfer(lc fx.Lifecycle, net dtypes.ProviderTransferNetwork, transport dtypes.ProviderTransport, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) {
|
||||
dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers"))
|
||||
|
||||
dt, err := dtimpl.NewDataTransfer(dtDs, net, transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt.OnReady(marketevents.ReadyLogger("provider data transfer"))
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
dt.SubscribeToEvents(marketevents.DataTransferLogger)
|
||||
return dt.Start(ctx)
|
||||
},
|
||||
OnStop: func(ctx context.Context) error {
|
||||
return dt.Stop(ctx)
|
||||
},
|
||||
})
|
||||
return dt, nil
|
||||
}
|
||||
|
||||
// NewProviderPieceStore creates a statestore for storing metadata about pieces
|
||||
// shared by the storage and retrieval providers
|
||||
func NewProviderPieceStore(lc fx.Lifecycle, ds dtypes.MetadataDS) (dtypes.ProviderPieceStore, error) {
|
||||
ps, err := piecestoreimpl.NewPieceStore(namespace.Wrap(ds, datastore.NewKey("/storagemarket")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ps.OnReady(marketevents.ReadyLogger("piecestore"))
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
return ps.Start(ctx)
|
||||
},
|
||||
})
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// StagingBlockstore creates a blockstore for staging blocks for a miner
|
||||
// in a storage deal, prior to sealing
|
||||
func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRepo) (dtypes.StagingBlockstore, error) {
|
||||
ctx := helpers.LifecycleCtx(mctx, lc)
|
||||
stagingds, err := r.Datastore(ctx, "/staging")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blockstore.FromDatastore(stagingds), nil
|
||||
}
|
||||
|
||||
// StagingGraphsync creates a graphsync instance which reads and writes blocks
|
||||
// to the StagingBlockstore
|
||||
func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync {
|
||||
return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync {
|
||||
graphsyncNetwork := gsnet.NewFromLibp2pHost(h)
|
||||
lsys := storeutil.LinkSystemForBlockstore(ibs)
|
||||
gs := graphsync.New(helpers.LifecycleCtx(mctx, lc),
|
||||
graphsyncNetwork,
|
||||
lsys,
|
||||
graphsync.RejectAllRequestsByDefault(),
|
||||
graphsync.MaxInProgressIncomingRequests(parallelTransfersForRetrieval),
|
||||
graphsync.MaxInProgressIncomingRequestsPerPeer(parallelTransfersForStoragePerPeer),
|
||||
graphsync.MaxInProgressOutgoingRequests(parallelTransfersForStorage),
|
||||
graphsync.MaxLinksPerIncomingRequests(config.MaxTraversalLinks),
|
||||
graphsync.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks))
|
||||
|
||||
graphsyncStats(mctx, lc, gs)
|
||||
|
||||
return gs
|
||||
}
|
||||
}
|
||||
|
||||
func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api v1api.FullNode, epp gen.WinningPoStProver, sf *slashfilter.SlashFilter, j journal.Journal) (*lotusminer.Miner, error) {
|
||||
minerAddr, err := minerAddrFromDS(ds)
|
||||
if err != nil {
|
||||
@ -512,273 +325,6 @@ func SetupBlockProducer(lc fx.Lifecycle, ds dtypes.MetadataDS, api v1api.FullNod
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func NewStorageAsk(ctx helpers.MetricsCtx, fapi v1api.FullNode, ds dtypes.MetadataDS, minerAddress dtypes.MinerAddress, spn storagemarket.StorageProviderNode) (*storedask.StoredAsk, error) {
|
||||
|
||||
mi, err := fapi.StateMinerInfo(ctx, address.Address(minerAddress), types.EmptyTSK)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
providerDs := namespace.Wrap(ds, datastore.NewKey("/deals/provider"))
|
||||
// legacy this was mistake where this key was place -- so we move the legacy key if need be
|
||||
err = shared.MoveKey(providerDs, "/latest-ask", "/storage-ask/latest")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return storedask.NewStoredAsk(namespace.Wrap(providerDs, datastore.NewKey("/storage-ask")), datastore.NewKey("latest"), spn, address.Address(minerAddress),
|
||||
storagemarket.MaxPieceSize(abi.PaddedPieceSize(mi.SectorSize)))
|
||||
}
|
||||
|
||||
func BasicDealFilter(cfg config.DealmakingConfig, user dtypes.StorageDealFilter) func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc,
|
||||
offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc,
|
||||
verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc,
|
||||
unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc,
|
||||
blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc,
|
||||
expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc,
|
||||
startDelay dtypes.GetMaxDealStartDelayFunc,
|
||||
spn storagemarket.StorageProviderNode,
|
||||
r repo.LockedRepo,
|
||||
) dtypes.StorageDealFilter {
|
||||
return func(onlineOk dtypes.ConsiderOnlineStorageDealsConfigFunc,
|
||||
offlineOk dtypes.ConsiderOfflineStorageDealsConfigFunc,
|
||||
verifiedOk dtypes.ConsiderVerifiedStorageDealsConfigFunc,
|
||||
unverifiedOk dtypes.ConsiderUnverifiedStorageDealsConfigFunc,
|
||||
blocklistFunc dtypes.StorageDealPieceCidBlocklistConfigFunc,
|
||||
expectedSealTimeFunc dtypes.GetExpectedSealDurationFunc,
|
||||
startDelay dtypes.GetMaxDealStartDelayFunc,
|
||||
spn storagemarket.StorageProviderNode,
|
||||
r repo.LockedRepo,
|
||||
) dtypes.StorageDealFilter {
|
||||
|
||||
return func(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) {
|
||||
b, err := onlineOk()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
if deal.Ref != nil && deal.Ref.TransferType != storagemarket.TTManual && !b {
|
||||
log.Warnf("online storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String())
|
||||
return false, "miner is not considering online storage deals", nil
|
||||
}
|
||||
|
||||
b, err = offlineOk()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
if deal.Ref != nil && deal.Ref.TransferType == storagemarket.TTManual && !b {
|
||||
log.Warnf("offline storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String())
|
||||
return false, "miner is not accepting offline storage deals", nil
|
||||
}
|
||||
|
||||
b, err = verifiedOk()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
if deal.Proposal.VerifiedDeal && !b {
|
||||
log.Warnf("verified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String())
|
||||
return false, "miner is not accepting verified storage deals", nil
|
||||
}
|
||||
|
||||
b, err = unverifiedOk()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
if !deal.Proposal.VerifiedDeal && !b {
|
||||
log.Warnf("unverified storage deal consideration disabled; rejecting storage deal proposal from client: %s", deal.Client.String())
|
||||
return false, "miner is not accepting unverified storage deals", nil
|
||||
}
|
||||
|
||||
blocklist, err := blocklistFunc()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
for idx := range blocklist {
|
||||
if deal.Proposal.PieceCID.Equals(blocklist[idx]) {
|
||||
log.Warnf("piece CID in proposal %s is blocklisted; rejecting storage deal proposal from client: %s", deal.Proposal.PieceCID, deal.Client.String())
|
||||
return false, fmt.Sprintf("miner has blocklisted piece CID %s", deal.Proposal.PieceCID), nil
|
||||
}
|
||||
}
|
||||
|
||||
sealDuration, err := expectedSealTimeFunc()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
sealEpochs := sealDuration / (time.Duration(build.BlockDelaySecs) * time.Second)
|
||||
_, ht, err := spn.GetChainHead(ctx)
|
||||
if err != nil {
|
||||
return false, "failed to get chain head", err
|
||||
}
|
||||
earliest := abi.ChainEpoch(sealEpochs) + ht
|
||||
if deal.Proposal.StartEpoch < earliest {
|
||||
log.Warnw("proposed deal would start before sealing can be completed; rejecting storage deal proposal from client", "piece_cid", deal.Proposal.PieceCID, "client", deal.Client.String(), "seal_duration", sealDuration, "earliest", earliest, "curepoch", ht)
|
||||
return false, fmt.Sprintf("cannot seal a sector before %s", deal.Proposal.StartEpoch), nil
|
||||
}
|
||||
|
||||
sd, err := startDelay()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
dir := filepath.Join(r.Path(), StagingAreaDirName)
|
||||
diskUsageBytes, err := r.DiskUsage(dir)
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
if cfg.MaxStagingDealsBytes != 0 && diskUsageBytes >= cfg.MaxStagingDealsBytes {
|
||||
log.Errorw("proposed deal rejected because there are too many deals in the staging area at the moment", "MaxStagingDealsBytes", cfg.MaxStagingDealsBytes, "DiskUsageBytes", diskUsageBytes)
|
||||
return false, "cannot accept deal as miner is overloaded at the moment - there are too many staging deals being processed", nil
|
||||
}
|
||||
|
||||
// Reject if it's more than 7 days in the future
|
||||
// TODO: read from cfg
|
||||
maxStartEpoch := earliest + abi.ChainEpoch(uint64(sd.Seconds())/build.BlockDelaySecs)
|
||||
if deal.Proposal.StartEpoch > maxStartEpoch {
|
||||
return false, fmt.Sprintf("deal start epoch is too far in the future: %s > %s", deal.Proposal.StartEpoch, maxStartEpoch), nil
|
||||
}
|
||||
|
||||
if user != nil {
|
||||
return user(ctx, deal)
|
||||
}
|
||||
|
||||
return true, "", nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func StorageProvider(minerAddress dtypes.MinerAddress,
|
||||
storedAsk *storedask.StoredAsk,
|
||||
h host.Host, ds dtypes.MetadataDS,
|
||||
r repo.LockedRepo,
|
||||
pieceStore dtypes.ProviderPieceStore,
|
||||
indexer provider.Interface,
|
||||
dataTransfer dtypes.ProviderDataTransfer,
|
||||
spn storagemarket.StorageProviderNode,
|
||||
df dtypes.StorageDealFilter,
|
||||
dsw *dagstore.Wrapper,
|
||||
meshCreator idxprov.MeshCreator,
|
||||
) (storagemarket.StorageProvider, error) {
|
||||
net := smnet.NewFromLibp2pHost(h)
|
||||
|
||||
dir := filepath.Join(r.Path(), StagingAreaDirName)
|
||||
|
||||
// migrate temporary files that were created directly under the repo, by
|
||||
// moving them to the new directory and symlinking them.
|
||||
oldDir := r.Path()
|
||||
if err := migrateDealStaging(oldDir, dir); err != nil {
|
||||
return nil, xerrors.Errorf("failed to make deal staging directory %w", err)
|
||||
}
|
||||
|
||||
store, err := piecefilestore.NewLocalFileStore(piecefilestore.OsPath(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opt := storageimpl.CustomDealDecisionLogic(storageimpl.DealDeciderFunc(df))
|
||||
|
||||
return storageimpl.NewProvider(
|
||||
net,
|
||||
namespace.Wrap(ds, datastore.NewKey("/deals/provider")),
|
||||
store,
|
||||
dsw,
|
||||
indexer,
|
||||
pieceStore,
|
||||
dataTransfer,
|
||||
spn,
|
||||
address.Address(minerAddress),
|
||||
storedAsk,
|
||||
meshCreator,
|
||||
opt,
|
||||
)
|
||||
}
|
||||
|
||||
func RetrievalDealFilter(userFilter dtypes.RetrievalDealFilter) func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter {
|
||||
return func(onlineOk dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
offlineOk dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalDealFilter {
|
||||
return func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) {
|
||||
b, err := onlineOk()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
if !b {
|
||||
log.Warn("online retrieval deal consideration disabled; rejecting retrieval deal proposal from client")
|
||||
return false, "miner is not accepting online retrieval deals", nil
|
||||
}
|
||||
|
||||
b, err = offlineOk()
|
||||
if err != nil {
|
||||
return false, "miner error", err
|
||||
}
|
||||
|
||||
if !b {
|
||||
log.Info("offline retrieval has not been implemented yet")
|
||||
}
|
||||
|
||||
if userFilter != nil {
|
||||
return userFilter(ctx, state)
|
||||
}
|
||||
|
||||
return true, "", nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RetrievalNetwork(h host.Host) rmnet.RetrievalMarketNetwork {
|
||||
return rmnet.NewFromLibp2pHost(h)
|
||||
}
|
||||
|
||||
// RetrievalPricingFunc configures the pricing function to use for retrieval deals.
|
||||
func RetrievalPricingFunc(cfg config.DealmakingConfig) func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
_ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc {
|
||||
|
||||
return func(_ dtypes.ConsiderOnlineRetrievalDealsConfigFunc,
|
||||
_ dtypes.ConsiderOfflineRetrievalDealsConfigFunc) dtypes.RetrievalPricingFunc {
|
||||
if cfg.RetrievalPricing.Strategy == config.RetrievalPricingExternalMode {
|
||||
return pricing.ExternalRetrievalPricingFunc(cfg.RetrievalPricing.External.Path)
|
||||
}
|
||||
|
||||
return retrievalimpl.DefaultPricingFunc(cfg.RetrievalPricing.Default.VerifiedDealsFreeTransfer)
|
||||
}
|
||||
}
|
||||
|
||||
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
|
||||
func RetrievalProvider(
|
||||
maddr dtypes.MinerAddress,
|
||||
adapter retrievalmarket.RetrievalProviderNode,
|
||||
sa retrievalmarket.SectorAccessor,
|
||||
netwk rmnet.RetrievalMarketNetwork,
|
||||
ds dtypes.MetadataDS,
|
||||
pieceStore dtypes.ProviderPieceStore,
|
||||
dt dtypes.ProviderDataTransfer,
|
||||
pricingFnc dtypes.RetrievalPricingFunc,
|
||||
userFilter dtypes.RetrievalDealFilter,
|
||||
dagStore *dagstore.Wrapper,
|
||||
) (retrievalmarket.RetrievalProvider, error) {
|
||||
opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter))
|
||||
|
||||
retrievalmarket.DefaultPricePerByte = big.Zero() // todo: for whatever reason this is a global var in markets
|
||||
|
||||
return retrievalimpl.NewProvider(
|
||||
address.Address(maddr),
|
||||
adapter,
|
||||
sa,
|
||||
netwk,
|
||||
pieceStore,
|
||||
dagStore,
|
||||
dt,
|
||||
namespace.Wrap(ds, datastore.NewKey("/retrievals/provider")),
|
||||
retrievalimpl.RetrievalPricingFunc(pricingFnc),
|
||||
opt,
|
||||
)
|
||||
}
|
||||
|
||||
var WorkerCallsPrefix = datastore.NewKey("/worker/calls")
|
||||
var ManagerWorkPrefix = datastore.NewKey("/stmgr/calls")
|
||||
|
||||
@ -838,153 +384,6 @@ func StorageAuthWithURL(apiInfo string) interface{} {
|
||||
}
|
||||
}
|
||||
|
||||
func NewConsiderOnlineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineStorageDealsConfigFunc, error) {
|
||||
return func() (out bool, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = cfg.ConsiderOnlineStorageDeals
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetConsideringOnlineStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderOnlineStorageDealsConfigFunc, error) {
|
||||
return func(b bool) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.ConsiderOnlineStorageDeals = b
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewConsiderOnlineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOnlineRetrievalDealsConfigFunc, error) {
|
||||
return func() (out bool, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = cfg.ConsiderOnlineRetrievalDeals
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetConsiderOnlineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetConsiderOnlineRetrievalDealsConfigFunc, error) {
|
||||
return func(b bool) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.ConsiderOnlineRetrievalDeals = b
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.StorageDealPieceCidBlocklistConfigFunc, error) {
|
||||
return func() (out []cid.Cid, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = cfg.PieceCidBlocklist
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetStorageDealPieceCidBlocklistConfigFunc(r repo.LockedRepo) (dtypes.SetStorageDealPieceCidBlocklistConfigFunc, error) {
|
||||
return func(blocklist []cid.Cid) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.PieceCidBlocklist = blocklist
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewConsiderOfflineStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOfflineStorageDealsConfigFunc, error) {
|
||||
return func() (out bool, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = cfg.ConsiderOfflineStorageDeals
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetConsideringOfflineStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderOfflineStorageDealsConfigFunc, error) {
|
||||
return func(b bool) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.ConsiderOfflineStorageDeals = b
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewConsiderOfflineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderOfflineRetrievalDealsConfigFunc, error) {
|
||||
return func() (out bool, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = cfg.ConsiderOfflineRetrievalDeals
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetConsiderOfflineRetrievalDealsConfigFunc(r repo.LockedRepo) (dtypes.SetConsiderOfflineRetrievalDealsConfigFunc, error) {
|
||||
return func(b bool) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.ConsiderOfflineRetrievalDeals = b
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewConsiderVerifiedStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderVerifiedStorageDealsConfigFunc, error) {
|
||||
return func() (out bool, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = cfg.ConsiderVerifiedStorageDeals
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetConsideringVerifiedStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderVerifiedStorageDealsConfigFunc, error) {
|
||||
return func(b bool) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.ConsiderVerifiedStorageDeals = b
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewConsiderUnverifiedStorageDealsConfigFunc(r repo.LockedRepo) (dtypes.ConsiderUnverifiedStorageDealsConfigFunc, error) {
|
||||
return func() (out bool, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = cfg.ConsiderUnverifiedStorageDeals
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetConsideringUnverifiedStorageDealsFunc(r repo.LockedRepo) (dtypes.SetConsiderUnverifiedStorageDealsConfigFunc, error) {
|
||||
return func(b bool) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.ConsiderUnverifiedStorageDeals = b
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error) {
|
||||
return func(cfg sealiface.Config) (err error) {
|
||||
err = mutateSealingCfg(r, func(c config.SealingConfiger) {
|
||||
@ -1092,48 +491,6 @@ func NewGetSealConfigFunc(r repo.LockedRepo) (dtypes.GetSealingConfigFunc, error
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetExpectedSealDurationFunc(r repo.LockedRepo) (dtypes.SetExpectedSealDurationFunc, error) {
|
||||
return func(delay time.Duration) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.ExpectedSealDuration = config.Duration(delay)
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewGetExpectedSealDurationFunc(r repo.LockedRepo) (dtypes.GetExpectedSealDurationFunc, error) {
|
||||
return func() (out time.Duration, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = time.Duration(cfg.ExpectedSealDuration)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewSetMaxDealStartDelayFunc(r repo.LockedRepo) (dtypes.SetMaxDealStartDelayFunc, error) {
|
||||
return func(delay time.Duration) (err error) {
|
||||
err = mutateDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
cfg.MaxDealStartDelay = config.Duration(delay)
|
||||
c.SetDealmakingConfig(cfg)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewGetMaxDealStartDelayFunc(r repo.LockedRepo) (dtypes.GetMaxDealStartDelayFunc, error) {
|
||||
return func() (out time.Duration, err error) {
|
||||
err = readDealmakingCfg(r, func(c config.DealmakingConfiger) {
|
||||
cfg := c.GetDealmakingConfig()
|
||||
out = time.Duration(cfg.MaxDealStartDelay)
|
||||
})
|
||||
return
|
||||
}, nil
|
||||
}
|
||||
|
||||
func readSealingCfg(r repo.LockedRepo, accessor func(config.DealmakingConfiger, config.SealingConfiger)) error {
|
||||
raw, err := r.Config()
|
||||
if err != nil {
|
||||
@ -1171,91 +528,6 @@ func mutateSealingCfg(r repo.LockedRepo, mutator func(config.SealingConfiger)) e
|
||||
return multierr.Combine(typeErr, setConfigErr)
|
||||
}
|
||||
|
||||
func readDealmakingCfg(r repo.LockedRepo, accessor func(config.DealmakingConfiger)) error {
|
||||
raw, err := r.Config()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg, ok := raw.(config.DealmakingConfiger)
|
||||
if !ok {
|
||||
return xerrors.New("expected config with dealmaking config trait")
|
||||
}
|
||||
|
||||
accessor(cfg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mutateDealmakingCfg(r repo.LockedRepo, mutator func(config.DealmakingConfiger)) error {
|
||||
var typeErr error
|
||||
|
||||
setConfigErr := r.SetConfig(func(raw interface{}) {
|
||||
cfg, ok := raw.(config.DealmakingConfiger)
|
||||
if !ok {
|
||||
typeErr = errors.New("expected config with dealmaking config trait")
|
||||
return
|
||||
}
|
||||
|
||||
mutator(cfg)
|
||||
})
|
||||
|
||||
return multierr.Combine(typeErr, setConfigErr)
|
||||
}
|
||||
|
||||
func migrateDealStaging(oldPath, newPath string) error {
|
||||
dirInfo, err := os.Stat(newPath)
|
||||
if err == nil {
|
||||
if !dirInfo.IsDir() {
|
||||
return xerrors.Errorf("%s is not a directory", newPath)
|
||||
}
|
||||
// The newPath exists already, below migration has already occurred.
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the directory doesn't exist, create it
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(newPath, 0755); err != nil {
|
||||
return xerrors.Errorf("failed to mk directory %s for deal staging: %w", newPath, err)
|
||||
}
|
||||
} else { // if we failed for other reasons, abort.
|
||||
return err
|
||||
}
|
||||
|
||||
// if this is the first time we created the directory, symlink all staged deals into it. "Migration"
|
||||
// get a list of files in the miner repo
|
||||
dirEntries, err := os.ReadDir(oldPath)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to list directory %s for deal staging: %w", oldPath, err)
|
||||
}
|
||||
|
||||
for _, entry := range dirEntries {
|
||||
// ignore directories, they are not the deals.
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
// the FileStore from fil-storage-market creates temporary staged deal files with the pattern "fstmp"
|
||||
// https://github.com/filecoin-project/go-fil-markets/blob/00ff81e477d846ac0cb58a0c7d1c2e9afb5ee1db/filestore/filestore.go#L69
|
||||
name := entry.Name()
|
||||
if strings.Contains(name, "fstmp") {
|
||||
// from the miner repo
|
||||
oldPath := filepath.Join(oldPath, name)
|
||||
// to its subdir "deal-staging"
|
||||
newPath := filepath.Join(newPath, name)
|
||||
// create a symbolic link in the new deal staging directory to preserve existing staged deals.
|
||||
// all future staged deals will be created here.
|
||||
if err := os.Rename(oldPath, newPath); err != nil {
|
||||
return xerrors.Errorf("failed to move %s to %s: %w", oldPath, newPath, err)
|
||||
}
|
||||
if err := os.Symlink(newPath, oldPath); err != nil {
|
||||
return xerrors.Errorf("failed to symlink %s to %s: %w", oldPath, newPath, err)
|
||||
}
|
||||
log.Infow("symlinked staged deal", "from", oldPath, "to", newPath)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExtractEnabledMinerSubsystems(cfg config.MinerSubsystemConfig) (res api.MinerSubsystems) {
|
||||
if cfg.EnableMining {
|
||||
res = append(res, api.SubsystemMining)
|
||||
@ -1266,8 +538,6 @@ func ExtractEnabledMinerSubsystems(cfg config.MinerSubsystemConfig) (res api.Min
|
||||
if cfg.EnableSectorStorage {
|
||||
res = append(res, api.SubsystemSectorStorage)
|
||||
}
|
||||
if cfg.EnableMarkets {
|
||||
res = append(res, api.SubsystemMarkets)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
@ -1,94 +0,0 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore"
|
||||
|
||||
mdagstore "github.com/filecoin-project/lotus/markets/dagstore"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvDAGStoreCopyConcurrency = "LOTUS_DAGSTORE_COPY_CONCURRENCY"
|
||||
DefaultDAGStoreDir = "dagstore"
|
||||
)
|
||||
|
||||
// NewMinerAPI creates a new MinerAPI adaptor for the dagstore mounts.
|
||||
func NewMinerAPI(cfg config.DAGStoreConfig) func(fx.Lifecycle, repo.LockedRepo, dtypes.ProviderPieceStore, mdagstore.SectorAccessor) (mdagstore.MinerAPI, error) {
|
||||
return func(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa mdagstore.SectorAccessor) (mdagstore.MinerAPI, error) {
|
||||
// caps the amount of concurrent calls to the storage, so that we don't
|
||||
// spam it during heavy processes like bulk migration.
|
||||
if v, ok := os.LookupEnv("LOTUS_DAGSTORE_MOUNT_CONCURRENCY"); ok {
|
||||
concurrency, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
cfg.MaxConcurrencyStorageCalls = concurrency
|
||||
}
|
||||
}
|
||||
|
||||
mountApi := mdagstore.NewMinerAPI(pieceStore, sa, cfg.MaxConcurrencyStorageCalls, cfg.MaxConcurrentUnseals)
|
||||
ready := make(chan error, 1)
|
||||
pieceStore.OnReady(func(err error) {
|
||||
ready <- err
|
||||
})
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
if err := <-ready; err != nil {
|
||||
return fmt.Errorf("aborting dagstore start; piecestore failed to start: %s", err)
|
||||
}
|
||||
return mountApi.Start(ctx)
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
return mountApi, nil
|
||||
}
|
||||
}
|
||||
|
||||
// DAGStore constructs a DAG store using the supplied minerAPI, and the
|
||||
// user configuration. It returns both the DAGStore and the Wrapper suitable for
|
||||
// passing to markets.
|
||||
func DAGStore(cfg config.DAGStoreConfig) func(lc fx.Lifecycle, r repo.LockedRepo, minerAPI mdagstore.MinerAPI, h host.Host) (*dagstore.DAGStore, *mdagstore.Wrapper, error) {
|
||||
return func(lc fx.Lifecycle, r repo.LockedRepo, minerAPI mdagstore.MinerAPI, h host.Host) (*dagstore.DAGStore, *mdagstore.Wrapper, error) {
|
||||
// fall back to default root directory if not explicitly set in the config.
|
||||
if cfg.RootDir == "" {
|
||||
cfg.RootDir = filepath.Join(r.Path(), DefaultDAGStoreDir)
|
||||
}
|
||||
|
||||
v, ok := os.LookupEnv(EnvDAGStoreCopyConcurrency)
|
||||
if ok {
|
||||
concurrency, err := strconv.Atoi(v)
|
||||
if err == nil {
|
||||
cfg.MaxConcurrentReadyFetches = concurrency
|
||||
}
|
||||
}
|
||||
|
||||
dagst, w, err := mdagstore.NewDAGStore(cfg, minerAPI, h)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("failed to create DAG store: %w", err)
|
||||
}
|
||||
|
||||
lc.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
return w.Start(ctx)
|
||||
},
|
||||
OnStop: func(context.Context) error {
|
||||
return w.Close()
|
||||
},
|
||||
})
|
||||
|
||||
return dagst, w, nil
|
||||
}
|
||||
}
|
||||
@ -1,117 +0,0 @@
|
||||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
provider "github.com/ipni/index-provider"
|
||||
"github.com/ipni/index-provider/engine"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"go.uber.org/fx"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
type IdxProv struct {
|
||||
fx.In
|
||||
|
||||
fx.Lifecycle
|
||||
Datastore dtypes.MetadataDS
|
||||
}
|
||||
|
||||
func IndexProvider(cfg config.IndexProviderConfig) func(params IdxProv, marketHost host.Host, dt dtypes.ProviderDataTransfer, maddr dtypes.MinerAddress, ps *pubsub.PubSub, nn dtypes.NetworkName) (provider.Interface, error) {
|
||||
return func(args IdxProv, marketHost host.Host, dt dtypes.ProviderDataTransfer, maddr dtypes.MinerAddress, ps *pubsub.PubSub, nn dtypes.NetworkName) (provider.Interface, error) {
|
||||
topicName := cfg.TopicName
|
||||
// If indexer topic name is left empty, infer it from the network name.
|
||||
if topicName == "" {
|
||||
// Use the same mechanism as the Dependency Injection (DI) to construct the topic name,
|
||||
// so that we are certain it is consistent with the name allowed by the subscription
|
||||
// filter.
|
||||
//
|
||||
// See: lp2p.GossipSub.
|
||||
topicName = build.IndexerIngestTopic(nn)
|
||||
log.Debugw("Inferred indexer topic from network name", "topic", topicName)
|
||||
}
|
||||
|
||||
ipds := namespace.Wrap(args.Datastore, datastore.NewKey("/index-provider"))
|
||||
addrs := marketHost.Addrs()
|
||||
addrsString := make([]string, 0, len(addrs))
|
||||
for _, addr := range addrs {
|
||||
addrsString = append(addrsString, addr.String())
|
||||
}
|
||||
var opts = []engine.Option{
|
||||
engine.WithDatastore(ipds),
|
||||
engine.WithHost(marketHost),
|
||||
engine.WithRetrievalAddrs(addrsString...),
|
||||
engine.WithEntriesCacheCapacity(cfg.EntriesCacheCapacity),
|
||||
engine.WithChainedEntries(cfg.EntriesChunkSize),
|
||||
engine.WithTopicName(topicName),
|
||||
engine.WithPurgeCacheOnStart(cfg.PurgeCacheOnStart),
|
||||
}
|
||||
|
||||
llog := log.With(
|
||||
"idxProvEnabled", cfg.Enable,
|
||||
"pid", marketHost.ID(),
|
||||
"topic", topicName,
|
||||
"retAddrs", marketHost.Addrs())
|
||||
// If announcements to the network are enabled, then set options for datatransfer publisher.
|
||||
if cfg.Enable {
|
||||
// Join the indexer topic using the market's pubsub instance. Otherwise, the provider
|
||||
// engine would create its own instance of pubsub down the line in dagsync, which has
|
||||
// no validators by default.
|
||||
t, err := ps.Join(topicName)
|
||||
if err != nil {
|
||||
llog.Errorw("Failed to join indexer topic", "err", err)
|
||||
return nil, xerrors.Errorf("joining indexer topic %s: %w", topicName, err)
|
||||
}
|
||||
|
||||
// Get the miner ID and set as extra gossip data.
|
||||
// The extra data is required by the lotus-specific index-provider gossip message validators.
|
||||
ma := address.Address(maddr)
|
||||
opts = append(opts,
|
||||
engine.WithPublisherKind(engine.DataTransferPublisher),
|
||||
engine.WithDataTransfer(dt),
|
||||
engine.WithExtraGossipData(ma.Bytes()),
|
||||
engine.WithTopic(t),
|
||||
)
|
||||
llog = llog.With("extraGossipData", ma, "publisher", "data-transfer")
|
||||
} else {
|
||||
opts = append(opts, engine.WithPublisherKind(engine.NoPublisher))
|
||||
llog = llog.With("publisher", "none")
|
||||
}
|
||||
|
||||
// Instantiate the index provider engine.
|
||||
e, err := engine.New(opts...)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("creating indexer provider engine: %w", err)
|
||||
}
|
||||
llog.Info("Instantiated index provider engine")
|
||||
|
||||
args.Lifecycle.Append(fx.Hook{
|
||||
OnStart: func(ctx context.Context) error {
|
||||
// Note that the OnStart context is cancelled after startup. Its use in e.Start is
|
||||
// to start up gossipsub publishers and restore cache, all of which are completed
|
||||
// before e.Start returns. Therefore, it is fine to reuse the give context.
|
||||
if err := e.Start(ctx); err != nil {
|
||||
return xerrors.Errorf("starting indexer provider engine: %w", err)
|
||||
}
|
||||
log.Infof("Started index provider engine")
|
||||
return nil
|
||||
},
|
||||
OnStop: func(_ context.Context) error {
|
||||
if err := e.Shutdown(); err != nil {
|
||||
return xerrors.Errorf("shutting down indexer provider engine: %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
})
|
||||
return e, nil
|
||||
}
|
||||
}
|
||||
@ -1,99 +0,0 @@
|
||||
package modules_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-datastore"
|
||||
provider "github.com/ipni/index-provider"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/fx"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
func Test_IndexProviderTopic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
givenAllowedTopics []string
|
||||
givenConfiguredTopic string
|
||||
givenNetworkName dtypes.NetworkName
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Joins configured topic when allowed",
|
||||
givenAllowedTopics: []string{"fish"},
|
||||
givenConfiguredTopic: "fish",
|
||||
},
|
||||
{
|
||||
name: "Joins topic inferred from network name when allowed",
|
||||
givenAllowedTopics: []string{"/indexer/ingest/fish"},
|
||||
givenNetworkName: "fish",
|
||||
},
|
||||
{
|
||||
name: "Fails to join configured topic when disallowed",
|
||||
givenAllowedTopics: []string{"/indexer/ingest/fish"},
|
||||
givenConfiguredTopic: "lobster",
|
||||
wantErr: "joining indexer topic lobster: topic is not allowed by the subscription filter",
|
||||
},
|
||||
{
|
||||
name: "Fails to join topic inferred from network name when disallowed",
|
||||
givenAllowedTopics: []string{"/indexer/ingest/fish"},
|
||||
givenNetworkName: "lobster",
|
||||
wantErr: "joining indexer topic /indexer/ingest/lobster: topic is not allowed by the subscription filter",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
h, err := libp2p.New()
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, h.Close())
|
||||
}()
|
||||
|
||||
filter := pubsub.WithSubscriptionFilter(pubsub.NewAllowlistSubscriptionFilter(test.givenAllowedTopics...))
|
||||
ps, err := pubsub.NewGossipSub(ctx, h, filter)
|
||||
require.NoError(t, err)
|
||||
|
||||
app := fx.New(
|
||||
fx.Provide(
|
||||
func() host.Host { return h },
|
||||
func() dtypes.NetworkName { return test.givenNetworkName },
|
||||
func() dtypes.MinerAddress { return dtypes.MinerAddress(address.TestAddress) },
|
||||
func() dtypes.ProviderDataTransfer { return nil },
|
||||
func() *pubsub.PubSub { return ps },
|
||||
func() dtypes.MetadataDS { return datastore.NewMapDatastore() },
|
||||
modules.IndexProvider(config.IndexProviderConfig{
|
||||
Enable: true,
|
||||
TopicName: test.givenConfiguredTopic,
|
||||
EntriesChunkSize: 16384,
|
||||
}),
|
||||
),
|
||||
fx.Invoke(func(p provider.Interface) {}),
|
||||
)
|
||||
err = app.Start(ctx)
|
||||
|
||||
if test.wantErr == "" {
|
||||
require.NoError(t, err)
|
||||
err = app.Stop(ctx)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.True(t, strings.HasSuffix(err.Error(), test.wantErr))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1,275 +0,0 @@
|
||||
package imports
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
"github.com/ipfs/go-datastore/query"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/shared"
|
||||
)
|
||||
|
||||
var log = logging.Logger("importmgr")
|
||||
|
||||
type ID uint64
|
||||
|
||||
func (id ID) dsKey() datastore.Key {
|
||||
return datastore.NewKey(fmt.Sprintf("%d", id))
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
ds datastore.Batching
|
||||
rootDir string
|
||||
counter *shared.TimeCounter
|
||||
}
|
||||
|
||||
type LabelKey = string
|
||||
type LabelValue = string
|
||||
|
||||
const (
|
||||
CAROwnerImportMgr = "importmgr"
|
||||
CAROwnerUser = "user"
|
||||
)
|
||||
|
||||
const (
|
||||
LSource = LabelKey("source") // Function which created the import
|
||||
LRootCid = LabelKey("root") // Root CID
|
||||
LFileName = LabelKey("filename") // Local file path of the source file.
|
||||
LCARPath = LabelKey("car_path") // Path of the CARv2 file containing the imported data.
|
||||
LCAROwner = LabelKey("car_owner") // Owner of the CAR; "importmgr" is us; "user" or empty is them.
|
||||
)
|
||||
|
||||
func NewManager(ds datastore.Batching, rootDir string) *Manager {
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/stores"))
|
||||
ds = datastore.NewLogDatastore(ds, "storess")
|
||||
|
||||
m := &Manager{
|
||||
ds: ds,
|
||||
rootDir: rootDir,
|
||||
counter: shared.NewTimeCounter(),
|
||||
}
|
||||
|
||||
log.Info("sanity checking imports")
|
||||
|
||||
ids, err := m.List()
|
||||
if err != nil {
|
||||
log.Warnw("failed to enumerate imports on initialization", "error", err)
|
||||
return m
|
||||
}
|
||||
|
||||
var broken int
|
||||
for _, id := range ids {
|
||||
log := log.With("id", id)
|
||||
|
||||
info, err := m.Info(id)
|
||||
if err != nil {
|
||||
log.Warnw("failed to query metadata for import; skipping", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log = log.With("source", info.Labels[LSource], "root", info.Labels[LRootCid], "original", info.Labels[LFileName])
|
||||
|
||||
path, ok := info.Labels[LCARPath]
|
||||
if !ok {
|
||||
broken++
|
||||
log.Warnw("import lacks carv2 path; import will not work; please reimport")
|
||||
continue
|
||||
}
|
||||
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
broken++
|
||||
log.Warnw("import has missing/broken carv2; please reimport", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infow("import ok", "size", stat.Size())
|
||||
}
|
||||
|
||||
log.Infow("sanity check completed", "broken", broken, "total", len(ids))
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
type Meta struct {
|
||||
Labels map[LabelKey]LabelValue
|
||||
}
|
||||
|
||||
// CreateImport initializes a new import, returning its ID and optionally a
|
||||
// CAR path where to place the data, if requested.
|
||||
func (m *Manager) CreateImport() (id ID, err error) {
|
||||
ctx := context.TODO()
|
||||
id = ID(m.counter.Next())
|
||||
|
||||
meta := &Meta{Labels: map[LabelKey]LabelValue{
|
||||
LSource: "unknown",
|
||||
}}
|
||||
|
||||
metajson, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("marshaling store metadata: %w", err)
|
||||
}
|
||||
|
||||
err = m.ds.Put(ctx, id.dsKey(), metajson)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("failed to insert import metadata: %w", err)
|
||||
}
|
||||
|
||||
return id, err
|
||||
}
|
||||
|
||||
// AllocateCAR creates a new CAR allocated to the supplied import under the
|
||||
// root directory.
|
||||
func (m *Manager) AllocateCAR(id ID) (path string, err error) {
|
||||
ctx := context.TODO()
|
||||
meta, err := m.ds.Get(ctx, id.dsKey())
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm Meta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return "", xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
// refuse if a CAR path already exists.
|
||||
if curr := sm.Labels[LCARPath]; curr != "" {
|
||||
return "", xerrors.Errorf("import CAR already exists at %s: %w", curr, err)
|
||||
}
|
||||
|
||||
path = filepath.Join(m.rootDir, fmt.Sprintf("%d.car", id))
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("failed to create car file for import: %w", err)
|
||||
}
|
||||
|
||||
// close the file before returning the path.
|
||||
if err := file.Close(); err != nil {
|
||||
return "", xerrors.Errorf("failed to close temp file: %w", err)
|
||||
}
|
||||
|
||||
// record the path and ownership.
|
||||
sm.Labels[LCARPath] = path
|
||||
sm.Labels[LCAROwner] = CAROwnerImportMgr
|
||||
|
||||
if meta, err = json.Marshal(sm); err != nil {
|
||||
return "", xerrors.Errorf("marshaling store metadata: %w", err)
|
||||
}
|
||||
|
||||
err = m.ds.Put(ctx, id.dsKey(), meta)
|
||||
return path, err
|
||||
}
|
||||
|
||||
// AddLabel adds a label associated with an import, such as the source,
|
||||
// car path, CID, etc.
|
||||
func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error {
|
||||
ctx := context.TODO()
|
||||
meta, err := m.ds.Get(ctx, id.dsKey())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm Meta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
sm.Labels[key] = value
|
||||
|
||||
meta, err = json.Marshal(&sm)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("marshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
return m.ds.Put(ctx, id.dsKey(), meta)
|
||||
}
|
||||
|
||||
// List returns all import IDs known by this Manager.
|
||||
func (m *Manager) List() ([]ID, error) {
|
||||
ctx := context.TODO()
|
||||
var keys []ID
|
||||
|
||||
qres, err := m.ds.Query(ctx, query.Query{KeysOnly: true})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("query error: %w", err)
|
||||
}
|
||||
defer qres.Close() //nolint:errcheck
|
||||
|
||||
for r := range qres.Next() {
|
||||
k := r.Key
|
||||
if string(k[0]) == "/" {
|
||||
k = k[1:]
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(k, 10, 64)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to parse key %s to uint64, err=%w", r.Key, err)
|
||||
}
|
||||
keys = append(keys, ID(id))
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// Info returns the metadata known to this store for the specified import ID.
|
||||
func (m *Manager) Info(id ID) (*Meta, error) {
|
||||
ctx := context.TODO()
|
||||
|
||||
meta, err := m.ds.Get(ctx, id.dsKey())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("getting metadata form datastore: %w", err)
|
||||
}
|
||||
|
||||
var sm Meta
|
||||
if err := json.Unmarshal(meta, &sm); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshaling store meta: %w", err)
|
||||
}
|
||||
|
||||
return &sm, nil
|
||||
}
|
||||
|
||||
// Remove drops all data associated with the supplied import ID.
|
||||
func (m *Manager) Remove(id ID) error {
|
||||
ctx := context.TODO()
|
||||
if err := m.ds.Delete(ctx, id.dsKey()); err != nil {
|
||||
return xerrors.Errorf("removing import metadata: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) CARPathFor(dagRoot cid.Cid) (string, error) {
|
||||
ids, err := m.List()
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("failed to fetch import IDs: %w", err)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
info, err := m.Info(id)
|
||||
if err != nil {
|
||||
log.Errorf("failed to fetch info, importID=%d: %s", id, err)
|
||||
continue
|
||||
}
|
||||
if info.Labels[LRootCid] == "" {
|
||||
continue
|
||||
}
|
||||
c, err := cid.Parse(info.Labels[LRootCid])
|
||||
if err != nil {
|
||||
log.Errorf("failed to parse root cid %s: %s", info.Labels[LRootCid], err)
|
||||
continue
|
||||
}
|
||||
if c.Equals(dagRoot) {
|
||||
return info.Labels[LCARPath], nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
@ -13,7 +13,6 @@ import (
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper"
|
||||
commcid "github.com/filecoin-project/go-fil-commcid"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -435,7 +434,7 @@ func (mgr *SectorMgr) GenerateWindowPoStWithVanilla(ctx context.Context, proofTy
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) {
|
||||
func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (storiface.Reader, bool, error) {
|
||||
off := storiface.UnpaddedByteIndex(0)
|
||||
var piece cid.Cid
|
||||
|
||||
|
||||
@ -10,7 +10,6 @@ import (
|
||||
pool "github.com/libp2p/go-buffer-pool"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
@ -29,7 +28,7 @@ type PieceProvider interface {
|
||||
// default in most cases, but this might matter with future PoRep)
|
||||
// startOffset is added to the pieceOffset to get the starting reader offset.
|
||||
// The number of bytes that can be read is pieceSize-startOffset
|
||||
ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error)
|
||||
ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (storiface.Reader, bool, error)
|
||||
IsUnsealed(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error)
|
||||
}
|
||||
|
||||
@ -73,7 +72,7 @@ func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storiface.SectorR
|
||||
// It will NOT try to schedule an Unseal of a sealed sector file for the read.
|
||||
//
|
||||
// Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers.
|
||||
func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize) (mount.Reader, error) {
|
||||
func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize) (storiface.Reader, error) {
|
||||
// acquire a lock purely for reading unsealed sectors
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil {
|
||||
@ -169,7 +168,7 @@ var _ io.Closer = funcCloser(nil)
|
||||
// If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal,
|
||||
// the returned boolean parameter will be set to true.
|
||||
// If we have an existing unsealed file containing the given piece, the returned boolean will be set to false.
|
||||
func (p *pieceProvider) ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) {
|
||||
func (p *pieceProvider) ReadPiece(ctx context.Context, sector storiface.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (storiface.Reader, bool, error) {
|
||||
if err := pieceOffset.Valid(); err != nil {
|
||||
return nil, false, xerrors.Errorf("pieceOffset is not valid: %w", err)
|
||||
}
|
||||
@ -224,3 +223,5 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storiface.SectorRe
|
||||
|
||||
return r, uns, nil
|
||||
}
|
||||
|
||||
var _ storiface.Reader = &pieceReader{}
|
||||
|
||||
@ -12,7 +12,6 @@ import (
|
||||
"go.opencensus.io/tag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/dagstore/mount"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
@ -303,5 +302,3 @@ func (p *pieceReader) readInto(b []byte, off int64) (n int, err error) {
|
||||
|
||||
return n, cerr
|
||||
}
|
||||
|
||||
var _ mount.Reader = (*pieceReader)(nil)
|
||||
|
||||
@ -13,6 +13,16 @@ import (
|
||||
|
||||
type Data = io.Reader
|
||||
|
||||
// Reader is a fully-featured Reader. It is the
|
||||
// union of the standard IO sequential access method (Read), with seeking
|
||||
// ability (Seek), as well random access (ReadAt).
|
||||
type Reader interface {
|
||||
io.Closer
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
type SectorRef struct {
|
||||
ID abi.SectorID
|
||||
ProofType abi.RegisteredSealProof
|
||||
|
||||
Loading…
Reference in New Issue
Block a user