2019-08-01 14:19:53 +00:00
package modules
import (
2020-11-11 05:06:50 +00:00
"bytes"
2019-08-01 14:19:53 +00:00
"context"
2020-06-11 19:59:50 +00:00
"errors"
2020-06-18 20:32:20 +00:00
"fmt"
2022-04-12 00:45:13 +00:00
"net/http"
"os"
"path/filepath"
2021-06-01 09:42:28 +00:00
"strings"
2020-07-08 08:35:50 +00:00
"time"
2022-08-16 19:56:32 +00:00
"github.com/google/uuid"
2022-04-12 00:45:13 +00:00
"github.com/ipfs/go-cid"
2022-04-11 23:22:19 +00:00
"github.com/ipfs/go-datastore"
2022-06-14 15:00:51 +00:00
"github.com/ipfs/go-datastore/namespace"
graphsync "github.com/ipfs/go-graphsync/impl"
gsnet "github.com/ipfs/go-graphsync/network"
"github.com/ipfs/go-graphsync/storeutil"
2023-01-10 13:04:11 +00:00
provider "github.com/ipni/index-provider"
2022-08-25 18:20:41 +00:00
"github.com/libp2p/go-libp2p/core/host"
2020-07-23 02:05:11 +00:00
"go.uber.org/fx"
"go.uber.org/multierr"
"golang.org/x/xerrors"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-address"
2023-03-03 02:37:13 +00:00
dtimpl "github.com/filecoin-project/go-data-transfer/v2/impl"
dtnet "github.com/filecoin-project/go-data-transfer/v2/network"
dtgstransport "github.com/filecoin-project/go-data-transfer/v2/transport/graphsync"
2020-01-24 20:19:52 +00:00
piecefilestore "github.com/filecoin-project/go-fil-markets/filestore"
2020-09-29 11:53:30 +00:00
piecestoreimpl "github.com/filecoin-project/go-fil-markets/piecestore/impl"
2020-01-15 20:49:11 +00:00
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl"
2020-01-24 20:19:52 +00:00
rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network"
2020-09-29 11:53:30 +00:00
"github.com/filecoin-project/go-fil-markets/shared"
2020-01-15 20:49:11 +00:00
"github.com/filecoin-project/go-fil-markets/storagemarket"
storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl"
2020-05-20 22:46:44 +00:00
"github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask"
2020-02-06 02:43:37 +00:00
smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network"
2020-05-20 18:23:51 +00:00
"github.com/filecoin-project/go-jsonrpc/auth"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-paramfetch"
2020-09-07 03:49:10 +00:00
"github.com/filecoin-project/go-state-types/abi"
2021-11-22 12:04:12 +00:00
"github.com/filecoin-project/go-state-types/big"
2022-06-14 15:00:51 +00:00
"github.com/filecoin-project/go-statestore"
2020-08-17 13:26:18 +00:00
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
"github.com/filecoin-project/lotus/api"
2021-04-05 11:23:46 +00:00
"github.com/filecoin-project/lotus/api/v0api"
2021-04-05 17:56:53 +00:00
"github.com/filecoin-project/lotus/api/v1api"
2021-01-29 20:01:00 +00:00
"github.com/filecoin-project/lotus/blockstore"
2019-10-27 08:56:53 +00:00
"github.com/filecoin-project/lotus/build"
2021-01-20 02:06:00 +00:00
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
2022-08-09 11:30:34 +00:00
"github.com/filecoin-project/lotus/chain/events"
2019-11-25 04:45:13 +00:00
"github.com/filecoin-project/lotus/chain/gen"
2020-08-06 01:16:54 +00:00
"github.com/filecoin-project/lotus/chain/gen/slashfilter"
2020-03-08 08:07:58 +00:00
"github.com/filecoin-project/lotus/chain/types"
2020-09-21 22:52:33 +00:00
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/markets"
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
"github.com/filecoin-project/lotus/markets/dagstore"
2022-02-03 14:44:18 +00:00
"github.com/filecoin-project/lotus/markets/idxprov"
2020-07-31 01:27:42 +00:00
marketevents "github.com/filecoin-project/lotus/markets/loggers"
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
"github.com/filecoin-project/lotus/markets/pricing"
2021-01-20 02:06:00 +00:00
lotusminer "github.com/filecoin-project/lotus/miner"
2020-07-23 02:05:11 +00:00
"github.com/filecoin-project/lotus/node/config"
2019-10-18 04:47:41 +00:00
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/helpers"
"github.com/filecoin-project/lotus/node/repo"
2022-06-14 17:32:29 +00:00
"github.com/filecoin-project/lotus/storage/ctladdr"
2022-06-14 18:25:52 +00:00
"github.com/filecoin-project/lotus/storage/paths"
2022-06-14 17:41:59 +00:00
sealing "github.com/filecoin-project/lotus/storage/pipeline"
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
2022-06-14 18:03:38 +00:00
"github.com/filecoin-project/lotus/storage/sealer"
2022-06-17 11:52:19 +00:00
"github.com/filecoin-project/lotus/storage/sealer/storiface"
2022-06-14 17:27:04 +00:00
"github.com/filecoin-project/lotus/storage/wdpost"
2019-08-01 14:19:53 +00:00
)
2021-09-21 14:45:00 +00:00
var (
2022-08-17 15:53:44 +00:00
StagingAreaDirName = "deal-staging"
2021-09-21 14:45:00 +00:00
)
2020-05-19 23:24:59 +00:00
2022-08-16 19:39:06 +00:00
type UuidWrapper struct {
v1api . FullNode
}
func ( a * UuidWrapper ) MpoolPushMessage ( ctx context . Context , msg * types . Message , spec * api . MessageSendSpec ) ( * types . SignedMessage , error ) {
2022-08-17 14:47:05 +00:00
if spec == nil {
spec = new ( api . MessageSendSpec )
}
2022-08-16 19:39:06 +00:00
spec . MsgUuid = uuid . New ( )
2022-09-28 15:07:05 +00:00
return a . FullNode . MpoolPushMessage ( ctx , msg , spec )
2022-08-16 19:39:06 +00:00
}
2022-08-17 14:47:05 +00:00
func MakeUuidWrapper ( a v1api . RawFullNodeAPI ) v1api . FullNode {
2022-08-16 19:39:06 +00:00
return & UuidWrapper { a }
}
2019-08-08 01:16:58 +00:00
func minerAddrFromDS ( ds dtypes . MetadataDS ) ( address . Address , error ) {
2021-12-17 09:42:09 +00:00
maddrb , err := ds . Get ( context . TODO ( ) , datastore . NewKey ( "miner-address" ) )
2019-08-08 01:16:58 +00:00
if err != nil {
return address . Undef , err
}
return address . NewFromBytes ( maddrb )
}
2022-09-29 12:08:05 +00:00
func GetParams ( prover bool ) func ( spt abi . RegisteredSealProof ) error {
return func ( spt abi . RegisteredSealProof ) error {
ssize , err := spt . SectorSize ( )
if err != nil {
return err
}
// If built-in assets are disabled, we expect the user to have placed the right
// parameters in the right location on the filesystem (/var/tmp/filecoin-proof-parameters).
if build . DisableBuiltinAssets {
return nil
}
var provingSize uint64
if prover {
provingSize = uint64 ( ssize )
}
// TODO: We should fetch the params for the actual proof type, not just based on the size.
if err := paramfetch . GetParams ( context . TODO ( ) , build . ParametersJSON ( ) , build . SrsJSON ( ) , provingSize ) ; err != nil {
return xerrors . Errorf ( "fetching proof parameters: %w" , err )
}
2020-02-27 21:45:31 +00:00
2020-06-22 18:41:52 +00:00
return nil
}
2019-12-04 19:44:15 +00:00
}
2020-03-18 01:08:11 +00:00
func MinerAddress ( ds dtypes . MetadataDS ) ( dtypes . MinerAddress , error ) {
ma , err := minerAddrFromDS ( ds )
return dtypes . MinerAddress ( ma ) , err
}
func MinerID ( ma dtypes . MinerAddress ) ( dtypes . MinerID , error ) {
id , err := address . IDFromAddress ( address . Address ( ma ) )
return dtypes . MinerID ( id ) , err
}
2019-12-06 00:27:32 +00:00
2021-04-05 17:56:53 +00:00
func StorageNetworkName ( ctx helpers . MetricsCtx , a v1api . FullNode ) ( dtypes . NetworkName , error ) {
2020-09-24 21:30:11 +00:00
if ! build . Devnet {
return "testnetnet" , nil
}
2020-03-31 23:13:37 +00:00
return a . StateNetworkName ( ctx )
}
2023-07-13 07:30:15 +00:00
func SealProofType ( maddr dtypes . MinerAddress , fnapi v1api . FullNode ) ( abi . RegisteredSealProof , error ) {
2020-04-16 17:36:36 +00:00
mi , err := fnapi . StateMinerInfo ( context . TODO ( ) , address . Address ( maddr ) , types . EmptyTSK )
2020-03-03 22:19:22 +00:00
if err != nil {
2020-11-05 06:44:46 +00:00
return 0 , err
2019-08-01 14:19:53 +00:00
}
2021-01-20 02:06:00 +00:00
networkVersion , err := fnapi . StateNetworkVersion ( context . TODO ( ) , types . EmptyTSK )
if err != nil {
return 0 , err
}
2020-03-03 22:19:22 +00:00
2023-07-13 07:30:15 +00:00
// node seal proof type does not decide whether or not we use synthetic porep
return miner . PreferredSealProofTypeFromWindowPoStType ( networkVersion , mi . WindowPoStProofType , false )
2019-08-01 14:19:53 +00:00
}
2022-06-14 17:32:29 +00:00
func AddressSelector ( addrConf * config . MinerAddressConfig ) func ( ) ( * ctladdr . AddressSelector , error ) {
return func ( ) ( * ctladdr . AddressSelector , error ) {
as := & ctladdr . AddressSelector { }
2020-12-02 18:58:00 +00:00
if addrConf == nil {
return as , nil
}
2021-02-17 15:56:32 +00:00
as . DisableOwnerFallback = addrConf . DisableOwnerFallback
as . DisableWorkerFallback = addrConf . DisableWorkerFallback
2020-12-02 18:58:00 +00:00
for _ , s := range addrConf . PreCommitControl {
addr , err := address . NewFromString ( s )
if err != nil {
return nil , xerrors . Errorf ( "parsing precommit control address: %w" , err )
}
as . PreCommitControl = append ( as . PreCommitControl , addr )
}
for _ , s := range addrConf . CommitControl {
addr , err := address . NewFromString ( s )
if err != nil {
return nil , xerrors . Errorf ( "parsing commit control address: %w" , err )
}
as . CommitControl = append ( as . CommitControl , addr )
}
2021-02-17 15:56:32 +00:00
for _ , s := range addrConf . TerminateControl {
addr , err := address . NewFromString ( s )
if err != nil {
return nil , xerrors . Errorf ( "parsing terminate control address: %w" , err )
}
as . TerminateControl = append ( as . TerminateControl , addr )
}
2021-07-07 16:00:54 +00:00
for _ , s := range addrConf . DealPublishControl {
addr , err := address . NewFromString ( s )
if err != nil {
return nil , xerrors . Errorf ( "parsing deal publishing control address: %w" , err )
}
as . DealPublishControl = append ( as . DealPublishControl , addr )
}
2020-12-02 18:58:00 +00:00
return as , nil
}
}
2022-08-09 11:30:34 +00:00
func PreflightChecks ( mctx helpers . MetricsCtx , lc fx . Lifecycle , api v1api . FullNode , maddr dtypes . MinerAddress ) error {
ctx := helpers . LifecycleCtx ( mctx , lc )
lc . Append ( fx . Hook { OnStart : func ( context . Context ) error {
mi , err := api . StateMinerInfo ( ctx , address . Address ( maddr ) , types . EmptyTSK )
if err != nil {
return xerrors . Errorf ( "failed to resolve miner info: %w" , err )
}
workerKey , err := api . StateAccountKey ( ctx , mi . Worker , types . EmptyTSK )
if err != nil {
return xerrors . Errorf ( "failed to resolve worker key: %w" , err )
}
has , err := api . WalletHas ( ctx , workerKey )
if err != nil {
return xerrors . Errorf ( "failed to check wallet for worker key: %w" , err )
}
if ! has {
2022-08-09 11:43:08 +00:00
return xerrors . New ( "key for worker not found in local wallet" )
2022-08-09 11:30:34 +00:00
}
2022-12-09 09:27:33 +00:00
log . Infof ( "starting up miner %s, worker addr %s" , address . Address ( maddr ) , workerKey )
2022-08-09 11:30:34 +00:00
return nil
} } )
return nil
}
type SealingPipelineParams struct {
2020-07-20 13:45:17 +00:00
fx . In
2020-08-26 15:38:23 +00:00
Lifecycle fx . Lifecycle
MetricsCtx helpers . MetricsCtx
2021-04-05 17:56:53 +00:00
API v1api . FullNode
2020-08-26 15:38:23 +00:00
MetadataDS dtypes . MetadataDS
2022-06-14 18:03:38 +00:00
Sealer sealer . SectorManager
2022-06-17 11:52:19 +00:00
Verifier storiface . Verifier
Prover storiface . Prover
2020-08-26 15:38:23 +00:00
GetSealingConfigFn dtypes . GetSealingConfigFunc
2020-10-09 19:52:04 +00:00
Journal journal . Journal
2022-06-14 17:32:29 +00:00
AddrSel * ctladdr . AddressSelector
2022-03-28 20:54:22 +00:00
Maddr dtypes . MinerAddress
2020-08-26 15:38:23 +00:00
}
2022-08-09 11:30:34 +00:00
func SealingPipeline ( fc config . MinerFeeConfig ) func ( params SealingPipelineParams ) ( * sealing . Sealing , error ) {
return func ( params SealingPipelineParams ) ( * sealing . Sealing , error ) {
2020-08-26 15:38:23 +00:00
var (
ds = params . MetadataDS
mctx = params . MetricsCtx
lc = params . Lifecycle
api = params . API
sealer = params . Sealer
verif = params . Verifier
2021-05-19 13:20:23 +00:00
prover = params . Prover
2020-08-26 15:38:23 +00:00
gsd = params . GetSealingConfigFn
2020-10-09 19:52:04 +00:00
j = params . Journal
2020-12-02 18:58:00 +00:00
as = params . AddrSel
2022-03-28 20:54:22 +00:00
maddr = address . Address ( params . Maddr )
2020-08-26 15:38:23 +00:00
)
2022-03-28 20:54:22 +00:00
ctx := helpers . LifecycleCtx ( mctx , lc )
2022-08-09 11:30:34 +00:00
evts , err := events . NewEvents ( ctx , api )
2020-08-12 17:47:00 +00:00
if err != nil {
2022-08-09 11:30:34 +00:00
return nil , xerrors . Errorf ( "failed to subscribe to events: %w" , err )
2020-08-12 17:47:00 +00:00
}
2022-08-09 11:30:34 +00:00
md , err := api . StateMinerProvingDeadline ( ctx , maddr , types . EmptyTSK )
if err != nil {
return nil , xerrors . Errorf ( "getting miner info: %w" , err )
}
provingBuffer := md . WPoStProvingPeriod * 2
pcp := sealing . NewBasicPreCommitPolicy ( api , gsd , provingBuffer )
2022-08-17 15:53:44 +00:00
pipeline := sealing . New ( ctx , api , fc , evts , maddr , ds , sealer , verif , prover , & pcp , gsd , j , as )
2022-08-09 11:30:34 +00:00
2022-03-28 20:54:22 +00:00
lc . Append ( fx . Hook {
OnStart : func ( context . Context ) error {
2022-08-09 11:30:34 +00:00
go pipeline . Run ( ctx )
return nil
2022-03-28 20:54:22 +00:00
} ,
2022-08-09 11:30:34 +00:00
OnStop : pipeline . Stop ,
2022-03-28 20:54:22 +00:00
} )
2022-08-09 11:30:34 +00:00
return pipeline , nil
2022-03-28 20:54:22 +00:00
}
}
2022-08-09 11:30:34 +00:00
func WindowPostScheduler ( fc config . MinerFeeConfig , pc config . ProvingConfig ) func ( params SealingPipelineParams ) ( * wdpost . WindowPoStScheduler , error ) {
return func ( params SealingPipelineParams ) ( * wdpost . WindowPoStScheduler , error ) {
2022-03-28 20:54:22 +00:00
var (
mctx = params . MetricsCtx
lc = params . Lifecycle
api = params . API
sealer = params . Sealer
verif = params . Verifier
j = params . Journal
as = params . AddrSel
maddr = address . Address ( params . Maddr )
)
2020-08-12 17:47:00 +00:00
ctx := helpers . LifecycleCtx ( mctx , lc )
2022-07-01 20:20:05 +00:00
fps , err := wdpost . NewWindowedPoStScheduler ( api , fc , pc , as , sealer , verif , sealer , j , maddr )
2022-07-11 17:24:34 +00:00
2020-08-12 17:47:00 +00:00
if err != nil {
return nil , err
}
lc . Append ( fx . Hook {
OnStart : func ( context . Context ) error {
go fps . Run ( ctx )
2022-03-28 20:54:22 +00:00
return nil
2020-08-12 17:47:00 +00:00
} ,
} )
2022-03-28 20:54:22 +00:00
return fps , nil
2020-04-10 21:29:05 +00:00
}
2019-08-01 14:19:53 +00:00
}
2019-08-02 16:25:10 +00:00
2020-10-09 19:52:04 +00:00
func HandleRetrieval ( host host . Host , lc fx . Lifecycle , m retrievalmarket . RetrievalProvider , j journal . Journal ) {
2020-09-29 11:53:30 +00:00
m . OnReady ( marketevents . ReadyLogger ( "retrieval provider" ) )
2019-08-26 13:45:36 +00:00
lc . Append ( fx . Hook {
2020-09-29 11:53:30 +00:00
OnStart : func ( ctx context . Context ) error {
2020-07-31 01:27:42 +00:00
m . SubscribeToEvents ( marketevents . RetrievalProviderLogger )
2020-09-14 15:20:01 +00:00
2020-10-09 19:52:04 +00:00
evtType := j . RegisterEventType ( "markets/retrieval/provider" , "state_change" )
m . SubscribeToEvents ( markets . RetrievalProviderJournaler ( j , evtType ) )
2020-09-14 15:20:01 +00:00
2020-09-29 11:53:30 +00:00
return m . Start ( ctx )
2020-01-24 20:19:52 +00:00
} ,
OnStop : func ( context . Context ) error {
2020-05-27 20:53:20 +00:00
return m . Stop ( )
2019-08-26 13:45:36 +00:00
} ,
} )
}
2020-10-09 19:52:04 +00:00
func HandleDeals ( mctx helpers . MetricsCtx , lc fx . Lifecycle , host host . Host , h storagemarket . StorageProvider , j journal . Journal ) {
2019-08-06 23:08:34 +00:00
ctx := helpers . LifecycleCtx ( mctx , lc )
2020-09-29 11:53:30 +00:00
h . OnReady ( marketevents . ReadyLogger ( "storage provider" ) )
2019-08-06 23:08:34 +00:00
lc . Append ( fx . Hook {
OnStart : func ( context . Context ) error {
2020-07-31 01:27:42 +00:00
h . SubscribeToEvents ( marketevents . StorageProviderLogger )
2020-09-14 15:20:01 +00:00
2020-10-09 19:52:04 +00:00
evtType := j . RegisterEventType ( "markets/storage/provider" , "state_change" )
h . SubscribeToEvents ( markets . StorageProviderJournaler ( j , evtType ) )
2020-09-14 15:20:01 +00:00
2020-05-27 20:53:20 +00:00
return h . Start ( ctx )
2019-08-06 23:08:34 +00:00
} ,
OnStop : func ( context . Context ) error {
2020-05-27 20:53:20 +00:00
return h . Stop ( )
2019-08-06 23:08:34 +00:00
} ,
} )
2019-08-02 16:25:10 +00:00
}
2019-08-06 22:04:21 +00:00
2020-11-11 05:06:50 +00:00
func HandleMigrateProviderFunds ( lc fx . Lifecycle , ds dtypes . MetadataDS , node api . FullNode , minerAddress dtypes . MinerAddress ) {
lc . Append ( fx . Hook {
OnStart : func ( ctx context . Context ) error {
2021-12-17 09:42:09 +00:00
b , err := ds . Get ( ctx , datastore . NewKey ( "/marketfunds/provider" ) )
2020-11-11 05:06:50 +00:00
if err != nil {
if xerrors . Is ( err , datastore . ErrNotFound ) {
return nil
}
return err
}
var value abi . TokenAmount
if err = value . UnmarshalCBOR ( bytes . NewReader ( b ) ) ; err != nil {
return err
}
ts , err := node . ChainHead ( ctx )
if err != nil {
2021-02-11 11:00:26 +00:00
log . Errorf ( "provider funds migration - getting chain head: %v" , err )
2020-11-12 16:15:05 +00:00
return nil
2020-11-11 05:06:50 +00:00
}
mi , err := node . StateMinerInfo ( ctx , address . Address ( minerAddress ) , ts . Key ( ) )
if err != nil {
2021-02-11 11:00:26 +00:00
log . Errorf ( "provider funds migration - getting miner info %s: %v" , minerAddress , err )
2020-11-12 16:15:05 +00:00
return nil
2020-11-11 05:06:50 +00:00
}
_ , err = node . MarketReserveFunds ( ctx , mi . Worker , address . Address ( minerAddress ) , value )
if err != nil {
2021-02-11 11:00:26 +00:00
log . Errorf ( "provider funds migration - reserving funds (wallet %s, addr %s, funds %d): %v" ,
2020-11-12 16:15:05 +00:00
mi . Worker , minerAddress , value , err )
return nil
2020-11-11 05:06:50 +00:00
}
2021-12-17 09:42:09 +00:00
return ds . Delete ( ctx , datastore . NewKey ( "/marketfunds/provider" ) )
2020-11-11 05:06:50 +00:00
} ,
} )
}
2021-12-10 20:28:54 +00:00
// NewProviderTransferNetwork sets up the libp2p2 protocol networking for data transfer
func NewProviderTransferNetwork ( h host . Host ) dtypes . ProviderTransferNetwork {
return dtnet . NewFromLibp2pHost ( h )
}
// NewProviderTransport sets up a data transfer transport over graphsync
2022-01-15 01:04:41 +00:00
func NewProviderTransport ( h host . Host , gs dtypes . StagingGraphsync ) dtypes . ProviderTransport {
return dtgstransport . NewTransport ( h . ID ( ) , gs )
2021-12-10 20:28:54 +00:00
}
2020-07-08 08:35:50 +00:00
2021-12-10 20:28:54 +00:00
// NewProviderDataTransfer returns a data transfer manager
func NewProviderDataTransfer ( lc fx . Lifecycle , net dtypes . ProviderTransferNetwork , transport dtypes . ProviderTransport , ds dtypes . MetadataDS , r repo . LockedRepo ) ( dtypes . ProviderDataTransfer , error ) {
2020-07-08 08:35:50 +00:00
dtDs := namespace . Wrap ( ds , datastore . NewKey ( "/datatransfer/provider/transfers" ) )
2020-11-16 18:56:53 +00:00
2022-01-15 01:04:41 +00:00
dt , err := dtimpl . NewDataTransfer ( dtDs , net , transport )
2020-07-08 08:35:50 +00:00
if err != nil {
return nil , err
}
2020-10-13 10:37:00 +00:00
dt . OnReady ( marketevents . ReadyLogger ( "provider data transfer" ) )
2020-07-08 08:35:50 +00:00
lc . Append ( fx . Hook {
OnStart : func ( ctx context . Context ) error {
2020-11-23 20:43:30 +00:00
dt . SubscribeToEvents ( marketevents . DataTransferLogger )
2020-07-08 08:35:50 +00:00
return dt . Start ( ctx )
} ,
2020-08-29 01:03:27 +00:00
OnStop : func ( ctx context . Context ) error {
return dt . Stop ( ctx )
2020-07-08 08:35:50 +00:00
} ,
} )
return dt , nil
2019-11-11 20:51:28 +00:00
}
2020-01-24 20:19:52 +00:00
// NewProviderPieceStore creates a statestore for storing metadata about pieces
// shared by the storage and retrieval providers
2020-09-29 11:53:30 +00:00
func NewProviderPieceStore ( lc fx . Lifecycle , ds dtypes . MetadataDS ) ( dtypes . ProviderPieceStore , error ) {
ps , err := piecestoreimpl . NewPieceStore ( namespace . Wrap ( ds , datastore . NewKey ( "/storagemarket" ) ) )
if err != nil {
return nil , err
}
ps . OnReady ( marketevents . ReadyLogger ( "piecestore" ) )
lc . Append ( fx . Hook {
OnStart : func ( ctx context . Context ) error {
return ps . Start ( ctx )
} ,
} )
return ps , nil
2020-01-24 20:19:52 +00:00
}
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
// StagingBlockstore creates a blockstore for staging blocks for a miner
// in a storage deal, prior to sealing
2021-01-26 10:25:34 +00:00
func StagingBlockstore ( lc fx . Lifecycle , mctx helpers . MetricsCtx , r repo . LockedRepo ) ( dtypes . StagingBlockstore , error ) {
ctx := helpers . LifecycleCtx ( mctx , lc )
stagingds , err := r . Datastore ( ctx , "/staging" )
2019-08-06 22:04:21 +00:00
if err != nil {
return nil , err
}
2021-01-29 20:01:00 +00:00
return blockstore . FromDatastore ( stagingds ) , nil
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
}
// StagingGraphsync creates a graphsync instance which reads and writes blocks
// to the StagingBlockstore
2021-10-28 11:39:57 +00:00
func StagingGraphsync ( parallelTransfersForStorage uint64 , parallelTransfersForStoragePerPeer uint64 , parallelTransfersForRetrieval uint64 ) func ( mctx helpers . MetricsCtx , lc fx . Lifecycle , ibs dtypes . StagingBlockstore , h host . Host ) dtypes . StagingGraphsync {
2021-06-28 09:39:01 +00:00
return func ( mctx helpers . MetricsCtx , lc fx . Lifecycle , ibs dtypes . StagingBlockstore , h host . Host ) dtypes . StagingGraphsync {
graphsyncNetwork := gsnet . NewFromLibp2pHost ( h )
2021-06-01 22:34:06 +00:00
lsys := storeutil . LinkSystemForBlockstore ( ibs )
2021-10-01 08:26:38 +00:00
gs := graphsync . New ( helpers . LifecycleCtx ( mctx , lc ) ,
graphsyncNetwork ,
lsys ,
graphsync . RejectAllRequestsByDefault ( ) ,
graphsync . MaxInProgressIncomingRequests ( parallelTransfersForRetrieval ) ,
2021-10-28 11:39:57 +00:00
graphsync . MaxInProgressIncomingRequestsPerPeer ( parallelTransfersForStoragePerPeer ) ,
2021-10-01 08:26:38 +00:00
graphsync . MaxInProgressOutgoingRequests ( parallelTransfersForStorage ) ,
2021-10-28 11:39:57 +00:00
graphsync . MaxLinksPerIncomingRequests ( config . MaxTraversalLinks ) ,
graphsync . MaxLinksPerOutgoingRequests ( config . MaxTraversalLinks ) )
2021-06-28 09:39:01 +00:00
2021-10-19 17:45:25 +00:00
graphsyncStats ( mctx , lc , gs )
2021-06-28 09:39:01 +00:00
return gs
}
feat(datatransfer): implement and extract
feat(datatransfer): setup implementation path
Sets up a path to implementation, offering both the dagservice implementation and a future graphsync
implement, establishes message interfaces and network layer, and isolates the datatransfer module
from the app
WIP using CBOR encoding for dataxfermsg
* Bring cbor-gen stuff into datatransfer package
* make transferRequest private struct
* add transferResponse + funcs
* Rename VoucherID to VoucherType
* more tests passing
WIP trying out some stuff
* Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway
* get rid of pb stuff
all message tests passing, some others in datatransfer
Some cleanup for PR
Cleanup for PR, clarifying and additional comments
mod tidy
Respond to PR comments:
* Make DataTransferRequest/Response be returned in from Net
* Regenerate cbor_gen and fix the generator caller so it works better
* Please the linters
Fix tests
Initiate push and pull requests (#536)
* add issue link for data TransferID generation
* comment out failing but not relevant tests
* finish voucher rename from Identifier --> Type
tests passing
cleanup for PR
remove unused fmt import in graphsync_test
a better reflection
send data transfer response
other tests passing
feat(datatransfer): milestone 2 infrastructure
Setup test path for all tickets for milestone 2
responses alert subscribers when request is not accepted (#607)
Graphsync response is scheduled when a valid push request is received (#625)
fix(datatransfer): fix tests
fix an error with read buffers in tests
fix(deps): fix go.sum
Feat/dt graphsync pullreqs (#627)
* graphsync responses to pull requests
Feat/dt initiator cleanup (#645)
* ChannelID.To --> ChannelID.Initiator
* We now store our peer ID (from host.ID()) so it can be used when creating ChannelIDs.
* InProgressChannels returns all of impl.channels, currently just for testing
* Implements go-data-transfer issue
* Some assertions were changed based on the above.
* Renamed some variables and added some assertions based on the new understanding
* Updated SHA for graphsync module
* Updated fakeGraphSync test structs to use new interfaces from new SHA above
Techdebt/dt split graphsync impl receiver (#651)
* Split up graphsyncImpl and graphsyncReceiver
* rename graphsync to utils
DTM sends data over graphsync for validated push requests (#665)
* create channels when a request is received. register push request hook with graphsync. fix tests.
* better NewReaders
* use mutex lock around impl.channels access
* fix(datatransfer): fix test uncertainty
* fix a data race and also don't use random bytes in basic block which can fail
* privatize 3 funcs
with @hannahhoward
Feat/dt gs pullrequests (#693)
* Implements DTM Sends Data Over Graphsync For Validated Pull Requests
* rename a field in a test struct
* refactor a couple of private functions (one was refactored out of existence)
Feat/dt subscribe, file Xfer round trip (#720)
Implements the rest of Subscriber Is Notified When Request Completed #24:
* send a graphsync message within a go func and consume responses until error or transfer is complete.
* notify subscribers of results.
* Rename datatransfer.Event to EventCode.
* datatransfer.Event is now a struct that includes a message and a timestamp as well as the Event.Code int, formerly Event, update all uses
* Add extension data to graphsync request hook, gsReq
* rename sendRequest to sendDtRequest, to distinguish it from sendGsRequest, where Dt = datatransfer, Gs = graphsync
* use a mutex lock for last transfer ID
* obey the linter
Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754)
* update to correct graphsync version, update tests & code to call the new graphsync hooks
* getExtensionData returns an empty struct + nil if we can't find our extension
* Don't respond with error when we can't find the extension.
* Test for same
* mod tidy
minor fix to go.sum
feat(datatransfer): switch to graphsync implementation
Move over to real graphsync implementation of data transfer, add constructors for graphsync
instances on client and miner side
fix(datatransfer): Fix validators
Validators were checking payload cid against commP -- which are not the same any more. Added a
payloadCid to client deal to maintain the record, fixed validator logic
Feat/dt extraction use go-fil-components/datatransfer (#770)
* Initial commit after changing to go-fil-components/datatransfer
* blow away the datatransfer dir
* use go-fil-components master after its PR #1 was merged
* go mod tidy
use a package
updates after rebase with master
2019-10-30 02:42:16 +00:00
}
2021-04-05 17:56:53 +00:00
func SetupBlockProducer ( lc fx . Lifecycle , ds dtypes . MetadataDS , api v1api . FullNode , epp gen . WinningPoStProver , sf * slashfilter . SlashFilter , j journal . Journal ) ( * lotusminer . Miner , error ) {
2019-08-20 17:19:24 +00:00
minerAddr , err := minerAddrFromDS ( ds )
if err != nil {
2019-11-25 04:45:13 +00:00
return nil , err
2019-08-20 17:19:24 +00:00
}
2021-01-20 02:06:00 +00:00
m := lotusminer . NewMiner ( api , epp , minerAddr , sf , j )
2019-11-25 04:45:13 +00:00
2019-08-20 17:19:24 +00:00
lc . Append ( fx . Hook {
OnStart : func ( ctx context . Context ) error {
2020-05-05 19:01:44 +00:00
if err := m . Start ( ctx ) ; err != nil {
2019-11-25 04:45:13 +00:00
return err
2019-10-31 22:04:13 +00:00
}
return nil
2019-08-20 17:19:24 +00:00
} ,
2019-09-17 14:23:08 +00:00
OnStop : func ( ctx context . Context ) error {
2020-05-05 19:01:44 +00:00
return m . Stop ( ctx )
2019-09-17 14:23:08 +00:00
} ,
2019-08-20 17:19:24 +00:00
} )
2019-11-25 04:45:13 +00:00
return m , nil
2019-08-20 17:19:24 +00:00
}
2019-10-27 08:56:53 +00:00
2021-04-05 17:56:53 +00:00
func NewStorageAsk ( ctx helpers . MetricsCtx , fapi v1api . FullNode , ds dtypes . MetadataDS , minerAddress dtypes . MinerAddress , spn storagemarket . StorageProviderNode ) ( * storedask . StoredAsk , error ) {
2020-02-27 21:45:31 +00:00
2020-05-20 22:46:44 +00:00
mi , err := fapi . StateMinerInfo ( ctx , address . Address ( minerAddress ) , types . EmptyTSK )
2020-02-06 02:43:37 +00:00
if err != nil {
return nil , err
}
2020-02-27 21:45:31 +00:00
2020-09-29 11:53:30 +00:00
providerDs := namespace . Wrap ( ds , datastore . NewKey ( "/deals/provider" ) )
// legacy this was mistake where this key was place -- so we move the legacy key if need be
err = shared . MoveKey ( providerDs , "/latest-ask" , "/storage-ask/latest" )
if err != nil {
return nil , err
}
2020-11-18 05:53:48 +00:00
return storedask . NewStoredAsk ( namespace . Wrap ( providerDs , datastore . NewKey ( "/storage-ask" ) ) , datastore . NewKey ( "latest" ) , spn , address . Address ( minerAddress ) ,
2020-10-14 05:47:44 +00:00
storagemarket . MaxPieceSize ( abi . PaddedPieceSize ( mi . SectorSize ) ) )
2020-05-20 22:46:44 +00:00
}
2020-02-27 21:45:31 +00:00
2021-09-06 13:00:17 +00:00
func BasicDealFilter ( cfg config . DealmakingConfig , user dtypes . StorageDealFilter ) func ( onlineOk dtypes . ConsiderOnlineStorageDealsConfigFunc ,
2020-07-30 17:36:31 +00:00
offlineOk dtypes . ConsiderOfflineStorageDealsConfigFunc ,
2020-12-02 06:21:29 +00:00
verifiedOk dtypes . ConsiderVerifiedStorageDealsConfigFunc ,
unverifiedOk dtypes . ConsiderUnverifiedStorageDealsConfigFunc ,
2020-07-30 17:36:31 +00:00
blocklistFunc dtypes . StorageDealPieceCidBlocklistConfigFunc ,
expectedSealTimeFunc dtypes . GetExpectedSealDurationFunc ,
2021-06-23 17:45:08 +00:00
startDelay dtypes . GetMaxDealStartDelayFunc ,
2021-09-06 13:00:17 +00:00
spn storagemarket . StorageProviderNode ,
r repo . LockedRepo ,
) dtypes . StorageDealFilter {
2020-07-30 17:36:31 +00:00
return func ( onlineOk dtypes . ConsiderOnlineStorageDealsConfigFunc ,
offlineOk dtypes . ConsiderOfflineStorageDealsConfigFunc ,
2020-12-02 06:21:29 +00:00
verifiedOk dtypes . ConsiderVerifiedStorageDealsConfigFunc ,
unverifiedOk dtypes . ConsiderUnverifiedStorageDealsConfigFunc ,
2020-07-30 17:36:31 +00:00
blocklistFunc dtypes . StorageDealPieceCidBlocklistConfigFunc ,
expectedSealTimeFunc dtypes . GetExpectedSealDurationFunc ,
2021-06-23 17:45:08 +00:00
startDelay dtypes . GetMaxDealStartDelayFunc ,
2021-09-06 13:00:17 +00:00
spn storagemarket . StorageProviderNode ,
r repo . LockedRepo ,
) dtypes . StorageDealFilter {
2020-07-30 17:36:31 +00:00
return func ( ctx context . Context , deal storagemarket . MinerDeal ) ( bool , string , error ) {
b , err := onlineOk ( )
if err != nil {
return false , "miner error" , err
}
if deal . Ref != nil && deal . Ref . TransferType != storagemarket . TTManual && ! b {
log . Warnf ( "online storage deal consideration disabled; rejecting storage deal proposal from client: %s" , deal . Client . String ( ) )
return false , "miner is not considering online storage deals" , nil
}
b , err = offlineOk ( )
if err != nil {
return false , "miner error" , err
}
if deal . Ref != nil && deal . Ref . TransferType == storagemarket . TTManual && ! b {
log . Warnf ( "offline storage deal consideration disabled; rejecting storage deal proposal from client: %s" , deal . Client . String ( ) )
return false , "miner is not accepting offline storage deals" , nil
}
2020-12-02 06:21:29 +00:00
b , err = verifiedOk ( )
if err != nil {
return false , "miner error" , err
}
if deal . Proposal . VerifiedDeal && ! b {
log . Warnf ( "verified storage deal consideration disabled; rejecting storage deal proposal from client: %s" , deal . Client . String ( ) )
return false , "miner is not accepting verified storage deals" , nil
}
b , err = unverifiedOk ( )
if err != nil {
return false , "miner error" , err
}
if ! deal . Proposal . VerifiedDeal && ! b {
log . Warnf ( "unverified storage deal consideration disabled; rejecting storage deal proposal from client: %s" , deal . Client . String ( ) )
return false , "miner is not accepting unverified storage deals" , nil
}
2020-07-30 17:36:31 +00:00
blocklist , err := blocklistFunc ( )
if err != nil {
return false , "miner error" , err
}
for idx := range blocklist {
if deal . Proposal . PieceCID . Equals ( blocklist [ idx ] ) {
log . Warnf ( "piece CID in proposal %s is blocklisted; rejecting storage deal proposal from client: %s" , deal . Proposal . PieceCID , deal . Client . String ( ) )
return false , fmt . Sprintf ( "miner has blocklisted piece CID %s" , deal . Proposal . PieceCID ) , nil
}
}
sealDuration , err := expectedSealTimeFunc ( )
if err != nil {
return false , "miner error" , err
}
sealEpochs := sealDuration / ( time . Duration ( build . BlockDelaySecs ) * time . Second )
_ , ht , err := spn . GetChainHead ( ctx )
if err != nil {
return false , "failed to get chain head" , err
}
earliest := abi . ChainEpoch ( sealEpochs ) + ht
if deal . Proposal . StartEpoch < earliest {
log . Warnw ( "proposed deal would start before sealing can be completed; rejecting storage deal proposal from client" , "piece_cid" , deal . Proposal . PieceCID , "client" , deal . Client . String ( ) , "seal_duration" , sealDuration , "earliest" , earliest , "curepoch" , ht )
2021-08-30 08:42:41 +00:00
return false , fmt . Sprintf ( "cannot seal a sector before %s" , deal . Proposal . StartEpoch ) , nil
2020-07-30 17:36:31 +00:00
}
2021-06-23 17:45:08 +00:00
sd , err := startDelay ( )
if err != nil {
return false , "miner error" , err
}
2021-09-21 14:45:00 +00:00
dir := filepath . Join ( r . Path ( ) , StagingAreaDirName )
diskUsageBytes , err := r . DiskUsage ( dir )
2021-09-06 13:00:17 +00:00
if err != nil {
return false , "miner error" , err
}
2021-09-06 13:52:25 +00:00
if cfg . MaxStagingDealsBytes != 0 && diskUsageBytes >= cfg . MaxStagingDealsBytes {
log . Errorw ( "proposed deal rejected because there are too many deals in the staging area at the moment" , "MaxStagingDealsBytes" , cfg . MaxStagingDealsBytes , "DiskUsageBytes" , diskUsageBytes )
2021-09-06 13:00:17 +00:00
return false , "cannot accept deal as miner is overloaded at the moment - there are too many staging deals being processed" , nil
}
2020-10-05 19:35:58 +00:00
// Reject if it's more than 7 days in the future
2020-10-05 17:32:49 +00:00
// TODO: read from cfg
2021-06-23 17:45:08 +00:00
maxStartEpoch := earliest + abi . ChainEpoch ( uint64 ( sd . Seconds ( ) ) / build . BlockDelaySecs )
2020-10-05 17:32:49 +00:00
if deal . Proposal . StartEpoch > maxStartEpoch {
return false , fmt . Sprintf ( "deal start epoch is too far in the future: %s > %s" , deal . Proposal . StartEpoch , maxStartEpoch ) , nil
}
2020-07-30 17:36:31 +00:00
if user != nil {
return user ( ctx , deal )
}
return true , "" , nil
}
}
}
2020-07-12 17:54:53 +00:00
func StorageProvider ( minerAddress dtypes . MinerAddress ,
storedAsk * storedask . StoredAsk ,
h host . Host , ds dtypes . MetadataDS ,
r repo . LockedRepo ,
pieceStore dtypes . ProviderPieceStore ,
2021-11-10 16:28:23 +00:00
indexer provider . Interface ,
2020-07-12 17:54:53 +00:00
dataTransfer dtypes . ProviderDataTransfer ,
spn storagemarket . StorageProviderNode ,
2020-10-15 16:24:48 +00:00
df dtypes . StorageDealFilter ,
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
dsw * dagstore . Wrapper ,
2022-02-03 14:44:18 +00:00
meshCreator idxprov . MeshCreator ,
2020-07-30 17:36:31 +00:00
) ( storagemarket . StorageProvider , error ) {
2020-05-20 22:46:44 +00:00
net := smnet . NewFromLibp2pHost ( h )
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
2021-09-21 14:45:00 +00:00
dir := filepath . Join ( r . Path ( ) , StagingAreaDirName )
2021-08-16 23:16:06 +00:00
// migrate temporary files that were created directly under the repo, by
// moving them to the new directory and symlinking them.
oldDir := r . Path ( )
if err := migrateDealStaging ( oldDir , dir ) ; err != nil {
return nil , xerrors . Errorf ( "failed to make deal staging directory %w" , err )
}
store , err := piecefilestore . NewLocalFileStore ( piecefilestore . OsPath ( dir ) )
2020-02-27 21:45:31 +00:00
if err != nil {
return nil , err
}
2020-07-30 17:36:31 +00:00
opt := storageimpl . CustomDealDecisionLogic ( storageimpl . DealDeciderFunc ( df ) )
2020-06-11 18:29:59 +00:00
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
return storageimpl . NewProvider (
net ,
namespace . Wrap ( ds , datastore . NewKey ( "/deals/provider" ) ) ,
store ,
dsw ,
2021-11-10 16:28:23 +00:00
indexer ,
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
pieceStore ,
dataTransfer ,
spn ,
address . Address ( minerAddress ) ,
storedAsk ,
2022-02-03 14:44:18 +00:00
meshCreator ,
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
opt ,
)
2019-12-17 10:46:39 +00:00
}
2020-10-15 16:24:48 +00:00
func RetrievalDealFilter ( userFilter dtypes . RetrievalDealFilter ) func ( onlineOk dtypes . ConsiderOnlineRetrievalDealsConfigFunc ,
offlineOk dtypes . ConsiderOfflineRetrievalDealsConfigFunc ) dtypes . RetrievalDealFilter {
return func ( onlineOk dtypes . ConsiderOnlineRetrievalDealsConfigFunc ,
offlineOk dtypes . ConsiderOfflineRetrievalDealsConfigFunc ) dtypes . RetrievalDealFilter {
return func ( ctx context . Context , state retrievalmarket . ProviderDealState ) ( bool , string , error ) {
b , err := onlineOk ( )
if err != nil {
return false , "miner error" , err
}
if ! b {
log . Warn ( "online retrieval deal consideration disabled; rejecting retrieval deal proposal from client" )
return false , "miner is not accepting online retrieval deals" , nil
}
b , err = offlineOk ( )
if err != nil {
return false , "miner error" , err
}
if ! b {
log . Info ( "offline retrieval has not been implemented yet" )
}
if userFilter != nil {
return userFilter ( ctx , state )
}
return true , "" , nil
}
}
}
2021-05-20 10:49:44 +00:00
func RetrievalNetwork ( h host . Host ) rmnet . RetrievalMarketNetwork {
return rmnet . NewFromLibp2pHost ( h )
}
2021-05-22 17:10:21 +00:00
// RetrievalPricingFunc configures the pricing function to use for retrieval deals.
func RetrievalPricingFunc ( cfg config . DealmakingConfig ) func ( _ dtypes . ConsiderOnlineRetrievalDealsConfigFunc ,
_ dtypes . ConsiderOfflineRetrievalDealsConfigFunc ) dtypes . RetrievalPricingFunc {
return func ( _ dtypes . ConsiderOnlineRetrievalDealsConfigFunc ,
_ dtypes . ConsiderOfflineRetrievalDealsConfigFunc ) dtypes . RetrievalPricingFunc {
2021-06-14 04:40:29 +00:00
if cfg . RetrievalPricing . Strategy == config . RetrievalPricingExternalMode {
2021-05-22 17:10:21 +00:00
return pricing . ExternalRetrievalPricingFunc ( cfg . RetrievalPricing . External . Path )
}
return retrievalimpl . DefaultPricingFunc ( cfg . RetrievalPricing . Default . VerifiedDealsFreeTransfer )
}
}
2019-12-10 04:19:59 +00:00
// RetrievalProvider creates a new retrieval provider attached to the provider blockstore
2021-05-20 10:49:44 +00:00
func RetrievalProvider (
maddr dtypes . MinerAddress ,
adapter retrievalmarket . RetrievalProviderNode ,
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
sa retrievalmarket . SectorAccessor ,
2021-05-20 10:49:44 +00:00
netwk rmnet . RetrievalMarketNetwork ,
2020-10-15 16:24:48 +00:00
ds dtypes . MetadataDS ,
pieceStore dtypes . ProviderPieceStore ,
dt dtypes . ProviderDataTransfer ,
2021-05-22 17:10:21 +00:00
pricingFnc dtypes . RetrievalPricingFunc ,
2020-10-15 16:24:48 +00:00
userFilter dtypes . RetrievalDealFilter ,
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
dagStore * dagstore . Wrapper ,
2020-10-15 16:24:48 +00:00
) ( retrievalmarket . RetrievalProvider , error ) {
opt := retrievalimpl . DealDeciderOpt ( retrievalimpl . DealDecider ( userFilter ) )
2021-11-22 12:04:12 +00:00
retrievalmarket . DefaultPricePerByte = big . Zero ( ) // todo: for whatever reason this is a global var in markets
integrate DAG store and CARv2 in deal-making (#6671)
This commit removes badger from the deal-making processes, and
moves to a new architecture with the dagstore as the cental
component on the miner-side, and CARv2s on the client-side.
Every deal that has been handed off to the sealing subsystem becomes
a shard in the dagstore. Shards are mounted via the LotusMount, which
teaches the dagstore how to load the related piece when serving
retrievals.
When the miner starts the Lotus for the first time with this patch,
we will perform a one-time migration of all active deals into the
dagstore. This is a lightweight process, and it consists simply
of registering the shards in the dagstore.
Shards are backed by the unsealed copy of the piece. This is currently
a CARv1. However, the dagstore keeps CARv2 indices for all pieces, so
when it's time to acquire a shard to serve a retrieval, the unsealed
CARv1 is joined with its index (safeguarded by the dagstore), to form
a read-only blockstore, thus taking the place of the monolithic
badger.
Data transfers have been adjusted to interface directly with CARv2 files.
On inbound transfers (client retrievals, miner storage deals), we stream
the received data into a CARv2 ReadWrite blockstore. On outbound transfers
(client storage deals, miner retrievals), we serve the data off a CARv2
ReadOnly blockstore.
Client-side imports are managed by the refactored *imports.Manager
component (when not using IPFS integration). Just like it before, we use
the go-filestore library to avoid duplicating the data from the original
file in the resulting UnixFS DAG (concretely the leaves). However, the
target of those imports are what we call "ref-CARv2s": CARv2 files placed
under the `$LOTUS_PATH/imports` directory, containing the intermediate
nodes in full, and the leaves as positional references to the original file
on disk.
Client-side retrievals are placed into CARv2 files in the location:
`$LOTUS_PATH/retrievals`.
A new set of `Dagstore*` JSON-RPC operations and `lotus-miner dagstore`
subcommands have been introduced on the miner-side to inspect and manage
the dagstore.
Despite moving to a CARv2-backed system, the IPFS integration has been
respected, and it continues to be possible to make storage deals with data
held in an IPFS node, and to perform retrievals directly into an IPFS node.
NOTE: because the "staging" and "client" Badger blockstores are no longer
used, existing imports on the client will be rendered useless. On startup,
Lotus will enumerate all imports and print WARN statements on the log for
each import that needs to be reimported. These log lines contain these
messages:
- import lacks carv2 path; import will not work; please reimport
- import has missing/broken carv2; please reimport
At the end, we will print a "sanity check completed" message indicating
the count of imports found, and how many were deemed broken.
Co-authored-by: Aarsh Shah <aarshkshah1992@gmail.com>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
Co-authored-by: Raúl Kripalani <raul@protocol.ai>
Co-authored-by: Dirk McCormick <dirkmdev@gmail.com>
2021-08-16 22:34:32 +00:00
return retrievalimpl . NewProvider (
address . Address ( maddr ) ,
adapter ,
sa ,
netwk ,
pieceStore ,
dagStore ,
dt ,
namespace . Wrap ( ds , datastore . NewKey ( "/retrievals/provider" ) ) ,
retrievalimpl . RetrievalPricingFunc ( pricingFnc ) ,
opt ,
)
2019-12-10 04:19:59 +00:00
}
2020-03-24 18:00:08 +00:00
2020-09-14 07:44:55 +00:00
var WorkerCallsPrefix = datastore . NewKey ( "/worker/calls" )
2020-09-16 20:33:49 +00:00
var ManagerWorkPrefix = datastore . NewKey ( "/stmgr/calls" )
2020-09-14 07:44:55 +00:00
2022-06-14 18:25:52 +00:00
func LocalStorage ( mctx helpers . MetricsCtx , lc fx . Lifecycle , ls paths . LocalStorage , si paths . SectorIndex , urls paths . URLs ) ( * paths . Local , error ) {
2021-05-20 10:32:29 +00:00
ctx := helpers . LifecycleCtx ( mctx , lc )
2022-06-14 18:25:52 +00:00
return paths . NewLocal ( ctx , ls , si , urls )
2021-05-20 10:32:29 +00:00
}
2022-10-31 17:15:09 +00:00
func RemoteStorage ( lstor * paths . Local , si paths . SectorIndex , sa sealer . StorageAuth , sc config . SealerConfig ) * paths . Remote {
2022-06-14 18:25:52 +00:00
return paths . NewRemote ( lstor , si , http . Header ( sa ) , sc . ParallelFetchLimit , & paths . DefaultPartialFileHandler { } )
2021-05-20 10:32:29 +00:00
}
2022-10-31 17:15:09 +00:00
func SectorStorage ( mctx helpers . MetricsCtx , lc fx . Lifecycle , lstor * paths . Local , stor paths . Store , ls paths . LocalStorage , si paths . SectorIndex , sc config . SealerConfig , pc config . ProvingConfig , ds dtypes . MetadataDS ) ( * sealer . Manager , error ) {
2020-03-24 18:00:08 +00:00
ctx := helpers . LifecycleCtx ( mctx , lc )
2020-09-14 07:44:55 +00:00
wsts := statestore . New ( namespace . Wrap ( ds , WorkerCallsPrefix ) )
2020-09-16 20:33:49 +00:00
smsts := statestore . New ( namespace . Wrap ( ds , ManagerWorkPrefix ) )
2020-09-14 07:44:55 +00:00
2022-10-31 17:15:09 +00:00
sst , err := sealer . New ( ctx , lstor , stor , ls , si , sc , pc , wsts , smsts )
2020-03-24 23:49:45 +00:00
if err != nil {
return nil , err
}
lc . Append ( fx . Hook {
2020-07-17 22:31:14 +00:00
OnStop : sst . Close ,
2020-03-24 23:49:45 +00:00
} )
return sst , nil
2020-03-24 18:00:08 +00:00
}
2020-03-27 20:08:06 +00:00
2022-06-14 18:03:38 +00:00
func StorageAuth ( ctx helpers . MetricsCtx , ca v0api . Common ) ( sealer . StorageAuth , error ) {
2020-05-20 18:23:51 +00:00
token , err := ca . AuthNew ( ctx , [ ] auth . Permission { "admin" } )
2020-03-27 20:08:06 +00:00
if err != nil {
return nil , xerrors . Errorf ( "creating storage auth header: %w" , err )
}
headers := http . Header { }
headers . Add ( "Authorization" , "Bearer " + string ( token ) )
2022-06-14 18:03:38 +00:00
return sealer . StorageAuth ( headers ) , nil
2020-03-27 20:08:06 +00:00
}
2020-06-11 18:29:59 +00:00
2022-06-14 18:03:38 +00:00
func StorageAuthWithURL ( apiInfo string ) func ( ctx helpers . MetricsCtx , ca v0api . Common ) ( sealer . StorageAuth , error ) {
return func ( ctx helpers . MetricsCtx , ca v0api . Common ) ( sealer . StorageAuth , error ) {
2021-07-12 09:36:22 +00:00
s := strings . Split ( apiInfo , ":" )
2021-06-01 09:42:28 +00:00
if len ( s ) != 2 {
2021-07-12 09:36:22 +00:00
return nil , errors . New ( "unexpected format of `apiInfo`" )
2021-06-01 09:42:28 +00:00
}
headers := http . Header { }
headers . Add ( "Authorization" , "Bearer " + s [ 0 ] )
2022-06-14 18:03:38 +00:00
return sealer . StorageAuth ( headers ) , nil
2021-06-01 09:42:28 +00:00
}
}
2020-06-26 19:27:41 +00:00
func NewConsiderOnlineStorageDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . ConsiderOnlineStorageDealsConfigFunc , error ) {
2020-06-23 16:04:46 +00:00
return func ( ) ( out bool , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = cfg . ConsiderOnlineStorageDeals
2020-06-23 16:04:46 +00:00
} )
return
} , nil
}
2020-06-26 19:27:41 +00:00
func NewSetConsideringOnlineStorageDealsFunc ( r repo . LockedRepo ) ( dtypes . SetConsiderOnlineStorageDealsConfigFunc , error ) {
2020-06-23 16:04:46 +00:00
return func ( b bool ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . ConsiderOnlineStorageDeals = b
c . SetDealmakingConfig ( cfg )
2020-06-23 16:04:46 +00:00
} )
return
} , nil
}
2020-06-26 19:27:41 +00:00
func NewConsiderOnlineRetrievalDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . ConsiderOnlineRetrievalDealsConfigFunc , error ) {
2020-06-18 20:15:18 +00:00
return func ( ) ( out bool , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = cfg . ConsiderOnlineRetrievalDeals
2020-06-18 20:15:18 +00:00
} )
return
2020-06-11 18:29:59 +00:00
} , nil
}
2020-06-11 19:59:50 +00:00
2020-06-26 19:27:41 +00:00
func NewSetConsiderOnlineRetrievalDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . SetConsiderOnlineRetrievalDealsConfigFunc , error ) {
2020-06-18 20:15:18 +00:00
return func ( b bool ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . ConsiderOnlineRetrievalDeals = b
c . SetDealmakingConfig ( cfg )
2020-06-11 19:59:50 +00:00
} )
2020-06-18 20:15:18 +00:00
return
} , nil
}
2020-06-11 19:59:50 +00:00
2020-06-18 22:42:24 +00:00
func NewStorageDealPieceCidBlocklistConfigFunc ( r repo . LockedRepo ) ( dtypes . StorageDealPieceCidBlocklistConfigFunc , error ) {
2020-06-18 20:15:18 +00:00
return func ( ) ( out [ ] cid . Cid , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = cfg . PieceCidBlocklist
2020-06-18 20:15:18 +00:00
} )
return
2020-06-11 19:59:50 +00:00
} , nil
}
2020-06-18 20:15:18 +00:00
2020-06-18 22:42:24 +00:00
func NewSetStorageDealPieceCidBlocklistConfigFunc ( r repo . LockedRepo ) ( dtypes . SetStorageDealPieceCidBlocklistConfigFunc , error ) {
return func ( blocklist [ ] cid . Cid ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . PieceCidBlocklist = blocklist
c . SetDealmakingConfig ( cfg )
2020-06-18 20:15:18 +00:00
} )
return
} , nil
}
2020-06-26 19:27:41 +00:00
func NewConsiderOfflineStorageDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . ConsiderOfflineStorageDealsConfigFunc , error ) {
return func ( ) ( out bool , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = cfg . ConsiderOfflineStorageDeals
2020-06-26 19:27:41 +00:00
} )
return
} , nil
}
func NewSetConsideringOfflineStorageDealsFunc ( r repo . LockedRepo ) ( dtypes . SetConsiderOfflineStorageDealsConfigFunc , error ) {
return func ( b bool ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . ConsiderOfflineStorageDeals = b
c . SetDealmakingConfig ( cfg )
2020-06-26 19:27:41 +00:00
} )
return
} , nil
}
func NewConsiderOfflineRetrievalDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . ConsiderOfflineRetrievalDealsConfigFunc , error ) {
return func ( ) ( out bool , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = cfg . ConsiderOfflineRetrievalDeals
2020-06-26 19:27:41 +00:00
} )
return
} , nil
}
func NewSetConsiderOfflineRetrievalDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . SetConsiderOfflineRetrievalDealsConfigFunc , error ) {
return func ( b bool ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . ConsiderOfflineRetrievalDeals = b
c . SetDealmakingConfig ( cfg )
2020-06-26 19:27:41 +00:00
} )
return
} , nil
}
2020-12-02 06:21:29 +00:00
func NewConsiderVerifiedStorageDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . ConsiderVerifiedStorageDealsConfigFunc , error ) {
return func ( ) ( out bool , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = cfg . ConsiderVerifiedStorageDeals
2020-12-02 06:21:29 +00:00
} )
return
} , nil
}
func NewSetConsideringVerifiedStorageDealsFunc ( r repo . LockedRepo ) ( dtypes . SetConsiderVerifiedStorageDealsConfigFunc , error ) {
return func ( b bool ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . ConsiderVerifiedStorageDeals = b
c . SetDealmakingConfig ( cfg )
2020-12-02 06:21:29 +00:00
} )
return
} , nil
}
func NewConsiderUnverifiedStorageDealsConfigFunc ( r repo . LockedRepo ) ( dtypes . ConsiderUnverifiedStorageDealsConfigFunc , error ) {
return func ( ) ( out bool , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = cfg . ConsiderUnverifiedStorageDeals
2020-12-02 06:21:29 +00:00
} )
return
} , nil
}
func NewSetConsideringUnverifiedStorageDealsFunc ( r repo . LockedRepo ) ( dtypes . SetConsiderUnverifiedStorageDealsConfigFunc , error ) {
return func ( b bool ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . ConsiderUnverifiedStorageDeals = b
c . SetDealmakingConfig ( cfg )
2020-12-02 06:21:29 +00:00
} )
return
} , nil
}
2020-08-18 14:20:31 +00:00
func NewSetSealConfigFunc ( r repo . LockedRepo ) ( dtypes . SetSealingConfigFunc , error ) {
2020-08-18 16:27:18 +00:00
return func ( cfg sealiface . Config ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateSealingCfg ( r , func ( c config . SealingConfiger ) {
newCfg := config . SealingConfig {
2023-05-23 19:34:27 +00:00
MaxWaitDealsSectors : cfg . MaxWaitDealsSectors ,
MaxSealingSectors : cfg . MaxSealingSectors ,
MaxSealingSectorsForDeals : cfg . MaxSealingSectorsForDeals ,
PreferNewSectorsForDeals : cfg . PreferNewSectorsForDeals ,
MaxUpgradingSectors : cfg . MaxUpgradingSectors ,
CommittedCapacitySectorLifetime : config . Duration ( cfg . CommittedCapacitySectorLifetime ) ,
WaitDealsDelay : config . Duration ( cfg . WaitDealsDelay ) ,
MakeNewSectorForDeals : cfg . MakeNewSectorForDeals ,
MinUpgradeSectorExpiration : cfg . MinUpgradeSectorExpiration ,
MakeCCSectorsAvailable : cfg . MakeCCSectorsAvailable ,
AlwaysKeepUnsealedCopy : cfg . AlwaysKeepUnsealedCopy ,
FinalizeEarly : cfg . FinalizeEarly ,
2021-05-18 14:53:49 +00:00
2021-06-29 16:17:08 +00:00
CollateralFromMinerBalance : cfg . CollateralFromMinerBalance ,
2021-07-12 16:46:05 +00:00
AvailableBalanceBuffer : types . FIL ( cfg . AvailableBalanceBuffer ) ,
DisableCollateralFallback : cfg . DisableCollateralFallback ,
2021-05-18 14:53:49 +00:00
BatchPreCommits : cfg . BatchPreCommits ,
MaxPreCommitBatch : cfg . MaxPreCommitBatch ,
PreCommitBatchWait : config . Duration ( cfg . PreCommitBatchWait ) ,
PreCommitBatchSlack : config . Duration ( cfg . PreCommitBatchSlack ) ,
2021-10-01 01:23:39 +00:00
AggregateCommits : cfg . AggregateCommits ,
MinCommitBatch : cfg . MinCommitBatch ,
MaxCommitBatch : cfg . MaxCommitBatch ,
CommitBatchWait : config . Duration ( cfg . CommitBatchWait ) ,
CommitBatchSlack : config . Duration ( cfg . CommitBatchSlack ) ,
AggregateAboveBaseFee : types . FIL ( cfg . AggregateAboveBaseFee ) ,
BatchPreCommitAboveBaseFee : types . FIL ( cfg . BatchPreCommitAboveBaseFee ) ,
2021-05-18 14:53:49 +00:00
2023-04-01 23:30:32 +00:00
TerminateBatchMax : cfg . TerminateBatchMax ,
TerminateBatchMin : cfg . TerminateBatchMin ,
TerminateBatchWait : config . Duration ( cfg . TerminateBatchWait ) ,
MaxSectorProveCommitsSubmittedPerEpoch : cfg . MaxSectorProveCommitsSubmittedPerEpoch ,
2023-07-12 11:51:50 +00:00
UseSyntheticPoRep : cfg . UseSyntheticPoRep ,
2020-08-18 14:20:31 +00:00
}
2022-02-23 15:21:35 +00:00
c . SetSealingConfig ( newCfg )
2020-07-06 18:39:26 +00:00
} )
return
} , nil
}
2022-02-23 15:21:35 +00:00
func ToSealingConfig ( dealmakingCfg config . DealmakingConfig , sealingCfg config . SealingConfig ) sealiface . Config {
2021-07-01 19:07:53 +00:00
return sealiface . Config {
2023-05-23 19:34:27 +00:00
MaxWaitDealsSectors : sealingCfg . MaxWaitDealsSectors ,
MaxSealingSectors : sealingCfg . MaxSealingSectors ,
MaxSealingSectorsForDeals : sealingCfg . MaxSealingSectorsForDeals ,
PreferNewSectorsForDeals : sealingCfg . PreferNewSectorsForDeals ,
MinUpgradeSectorExpiration : sealingCfg . MinUpgradeSectorExpiration ,
MaxUpgradingSectors : sealingCfg . MaxUpgradingSectors ,
2022-09-14 10:45:22 +00:00
2022-04-12 00:45:13 +00:00
StartEpochSealingBuffer : abi . ChainEpoch ( dealmakingCfg . StartEpochSealingBuffer ) ,
MakeNewSectorForDeals : sealingCfg . MakeNewSectorForDeals ,
CommittedCapacitySectorLifetime : time . Duration ( sealingCfg . CommittedCapacitySectorLifetime ) ,
WaitDealsDelay : time . Duration ( sealingCfg . WaitDealsDelay ) ,
MakeCCSectorsAvailable : sealingCfg . MakeCCSectorsAvailable ,
AlwaysKeepUnsealedCopy : sealingCfg . AlwaysKeepUnsealedCopy ,
FinalizeEarly : sealingCfg . FinalizeEarly ,
CollateralFromMinerBalance : sealingCfg . CollateralFromMinerBalance ,
AvailableBalanceBuffer : types . BigInt ( sealingCfg . AvailableBalanceBuffer ) ,
DisableCollateralFallback : sealingCfg . DisableCollateralFallback ,
BatchPreCommits : sealingCfg . BatchPreCommits ,
MaxPreCommitBatch : sealingCfg . MaxPreCommitBatch ,
PreCommitBatchWait : time . Duration ( sealingCfg . PreCommitBatchWait ) ,
PreCommitBatchSlack : time . Duration ( sealingCfg . PreCommitBatchSlack ) ,
2023-04-01 23:30:32 +00:00
AggregateCommits : sealingCfg . AggregateCommits ,
MinCommitBatch : sealingCfg . MinCommitBatch ,
MaxCommitBatch : sealingCfg . MaxCommitBatch ,
CommitBatchWait : time . Duration ( sealingCfg . CommitBatchWait ) ,
CommitBatchSlack : time . Duration ( sealingCfg . CommitBatchSlack ) ,
AggregateAboveBaseFee : types . BigInt ( sealingCfg . AggregateAboveBaseFee ) ,
BatchPreCommitAboveBaseFee : types . BigInt ( sealingCfg . BatchPreCommitAboveBaseFee ) ,
MaxSectorProveCommitsSubmittedPerEpoch : sealingCfg . MaxSectorProveCommitsSubmittedPerEpoch ,
2022-04-12 00:45:13 +00:00
2023-07-12 11:51:50 +00:00
TerminateBatchMax : sealingCfg . TerminateBatchMax ,
TerminateBatchMin : sealingCfg . TerminateBatchMin ,
TerminateBatchWait : time . Duration ( sealingCfg . TerminateBatchWait ) ,
UseSyntheticPoRep : sealingCfg . UseSyntheticPoRep ,
2021-07-01 19:07:53 +00:00
}
}
2020-08-18 14:20:31 +00:00
func NewGetSealConfigFunc ( r repo . LockedRepo ) ( dtypes . GetSealingConfigFunc , error ) {
2020-08-18 16:27:18 +00:00
return func ( ) ( out sealiface . Config , err error ) {
2022-02-23 15:21:35 +00:00
err = readSealingCfg ( r , func ( dc config . DealmakingConfiger , sc config . SealingConfiger ) {
scfg := sc . GetSealingConfig ( )
dcfg := dc . GetDealmakingConfig ( )
out = ToSealingConfig ( dcfg , scfg )
2020-07-06 18:39:26 +00:00
} )
return
} , nil
}
2020-07-12 17:54:53 +00:00
func NewSetExpectedSealDurationFunc ( r repo . LockedRepo ) ( dtypes . SetExpectedSealDurationFunc , error ) {
return func ( delay time . Duration ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . ExpectedSealDuration = config . Duration ( delay )
c . SetDealmakingConfig ( cfg )
2020-07-12 17:54:53 +00:00
} )
return
} , nil
}
func NewGetExpectedSealDurationFunc ( r repo . LockedRepo ) ( dtypes . GetExpectedSealDurationFunc , error ) {
return func ( ) ( out time . Duration , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = time . Duration ( cfg . ExpectedSealDuration )
2020-07-12 17:54:53 +00:00
} )
return
} , nil
}
2021-06-23 17:45:08 +00:00
func NewSetMaxDealStartDelayFunc ( r repo . LockedRepo ) ( dtypes . SetMaxDealStartDelayFunc , error ) {
return func ( delay time . Duration ) ( err error ) {
2022-02-23 15:21:35 +00:00
err = mutateDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
cfg . MaxDealStartDelay = config . Duration ( delay )
c . SetDealmakingConfig ( cfg )
2021-06-23 17:45:08 +00:00
} )
return
} , nil
}
func NewGetMaxDealStartDelayFunc ( r repo . LockedRepo ) ( dtypes . GetMaxDealStartDelayFunc , error ) {
return func ( ) ( out time . Duration , err error ) {
2022-02-23 15:21:35 +00:00
err = readDealmakingCfg ( r , func ( c config . DealmakingConfiger ) {
cfg := c . GetDealmakingConfig ( )
out = time . Duration ( cfg . MaxDealStartDelay )
2021-06-23 17:45:08 +00:00
} )
return
} , nil
}
2022-02-23 15:21:35 +00:00
func readSealingCfg ( r repo . LockedRepo , accessor func ( config . DealmakingConfiger , config . SealingConfiger ) ) error {
2020-06-18 20:15:18 +00:00
raw , err := r . Config ( )
if err != nil {
return err
}
2022-02-23 15:21:35 +00:00
scfg , ok := raw . ( config . SealingConfiger )
2020-06-18 20:15:18 +00:00
if ! ok {
2022-02-23 15:21:35 +00:00
return xerrors . New ( "expected config with sealing config trait" )
}
dcfg , ok := raw . ( config . DealmakingConfiger )
if ! ok {
return xerrors . New ( "expected config with dealmaking config trait" )
}
accessor ( dcfg , scfg )
return nil
}
func mutateSealingCfg ( r repo . LockedRepo , mutator func ( config . SealingConfiger ) ) error {
var typeErr error
setConfigErr := r . SetConfig ( func ( raw interface { } ) {
cfg , ok := raw . ( config . SealingConfiger )
if ! ok {
typeErr = errors . New ( "expected config with sealing config trait" )
return
}
mutator ( cfg )
} )
return multierr . Combine ( typeErr , setConfigErr )
}
func readDealmakingCfg ( r repo . LockedRepo , accessor func ( config . DealmakingConfiger ) ) error {
raw , err := r . Config ( )
if err != nil {
return err
}
cfg , ok := raw . ( config . DealmakingConfiger )
if ! ok {
return xerrors . New ( "expected config with dealmaking config trait" )
2020-06-18 20:15:18 +00:00
}
accessor ( cfg )
return nil
}
2022-02-23 15:21:35 +00:00
func mutateDealmakingCfg ( r repo . LockedRepo , mutator func ( config . DealmakingConfiger ) ) error {
2020-06-18 20:15:18 +00:00
var typeErr error
setConfigErr := r . SetConfig ( func ( raw interface { } ) {
2022-02-23 15:21:35 +00:00
cfg , ok := raw . ( config . DealmakingConfiger )
2020-06-18 20:15:18 +00:00
if ! ok {
2022-02-23 15:21:35 +00:00
typeErr = errors . New ( "expected config with dealmaking config trait" )
2020-06-18 20:15:18 +00:00
return
}
mutator ( cfg )
} )
return multierr . Combine ( typeErr , setConfigErr )
}
2021-07-28 09:10:17 +00:00
2021-08-16 23:16:06 +00:00
func migrateDealStaging ( oldPath , newPath string ) error {
dirInfo , err := os . Stat ( newPath )
if err == nil {
if ! dirInfo . IsDir ( ) {
return xerrors . Errorf ( "%s is not a directory" , newPath )
}
// The newPath exists already, below migration has already occurred.
return nil
}
// if the directory doesn't exist, create it
if os . IsNotExist ( err ) {
if err := os . MkdirAll ( newPath , 0755 ) ; err != nil {
return xerrors . Errorf ( "failed to mk directory %s for deal staging: %w" , newPath , err )
}
} else { // if we failed for other reasons, abort.
return err
}
// if this is the first time we created the directory, symlink all staged deals into it. "Migration"
// get a list of files in the miner repo
dirEntries , err := os . ReadDir ( oldPath )
if err != nil {
return xerrors . Errorf ( "failed to list directory %s for deal staging: %w" , oldPath , err )
}
for _ , entry := range dirEntries {
// ignore directories, they are not the deals.
if entry . IsDir ( ) {
continue
}
// the FileStore from fil-storage-market creates temporary staged deal files with the pattern "fstmp"
// https://github.com/filecoin-project/go-fil-markets/blob/00ff81e477d846ac0cb58a0c7d1c2e9afb5ee1db/filestore/filestore.go#L69
name := entry . Name ( )
if strings . Contains ( name , "fstmp" ) {
// from the miner repo
oldPath := filepath . Join ( oldPath , name )
// to its subdir "deal-staging"
newPath := filepath . Join ( newPath , name )
// create a symbolic link in the new deal staging directory to preserve existing staged deals.
// all future staged deals will be created here.
if err := os . Rename ( oldPath , newPath ) ; err != nil {
return xerrors . Errorf ( "failed to move %s to %s: %w" , oldPath , newPath , err )
}
if err := os . Symlink ( newPath , oldPath ) ; err != nil {
return xerrors . Errorf ( "failed to symlink %s to %s: %w" , oldPath , newPath , err )
}
log . Infow ( "symlinked staged deal" , "from" , oldPath , "to" , newPath )
}
}
return nil
}
2021-08-17 14:35:17 +00:00
2021-07-28 22:46:21 +00:00
func ExtractEnabledMinerSubsystems ( cfg config . MinerSubsystemConfig ) ( res api . MinerSubsystems ) {
2021-07-28 09:10:17 +00:00
if cfg . EnableMining {
2021-07-28 18:51:45 +00:00
res = append ( res , api . SubsystemMining )
2021-07-28 09:10:17 +00:00
}
if cfg . EnableSealing {
2021-07-28 18:51:45 +00:00
res = append ( res , api . SubsystemSealing )
2021-07-28 09:10:17 +00:00
}
if cfg . EnableSectorStorage {
2021-07-28 18:51:45 +00:00
res = append ( res , api . SubsystemSectorStorage )
2021-07-28 09:10:17 +00:00
}
if cfg . EnableMarkets {
2021-07-28 18:51:45 +00:00
res = append ( res , api . SubsystemMarkets )
2021-07-28 09:10:17 +00:00
}
2021-07-28 22:46:21 +00:00
return res
2021-07-28 09:10:17 +00:00
}